1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017-2018 NXP
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
27 /* RTA header files */
28 #include <hw/desc/common.h>
29 #include <hw/desc/algo.h>
33 #define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
34 static uint8_t cryptodev_driver_id;
37 enum rta_sec_era rta_sec_era;
39 /* Lists the states possible for the SEC user space driver. */
40 enum sec_driver_state_e {
41 SEC_DRIVER_STATE_IDLE, /* Driver not initialized */
42 SEC_DRIVER_STATE_STARTED, /* Driver initialized and can be used*/
43 SEC_DRIVER_STATE_RELEASE, /* Driver release is in progress */
46 /* Job rings used for communication with SEC HW */
47 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
49 /* The current state of SEC user space driver */
50 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
52 /* The number of job rings used by SEC user space driver */
53 static int g_job_rings_no;
54 static int g_job_rings_max;
56 struct sec_outring_entry {
57 phys_addr_t desc; /* Pointer to completed descriptor */
58 uint32_t status; /* Status for completed descriptor */
61 /* virtual address conversin when mempool support is available for ctx */
62 static inline phys_addr_t
63 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
65 PMD_INIT_FUNC_TRACE();
66 return (size_t)vaddr - ctx->vtop_offset;
70 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
72 PMD_INIT_FUNC_TRACE();
73 /* report op status to sym->op and then free the ctx memeory */
74 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
77 static inline struct caam_jr_op_ctx *
78 caam_jr_alloc_ctx(struct caam_jr_session *ses)
80 struct caam_jr_op_ctx *ctx;
83 PMD_INIT_FUNC_TRACE();
84 ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
86 CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
90 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
91 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
92 * to clear all the SG entries. caam_jr_alloc_ctx() is called for
93 * each packet, memset is costlier than dcbz_64().
95 dcbz_64(&ctx->sg[SG_CACHELINE_0]);
96 dcbz_64(&ctx->sg[SG_CACHELINE_1]);
97 dcbz_64(&ctx->sg[SG_CACHELINE_2]);
98 dcbz_64(&ctx->sg[SG_CACHELINE_3]);
100 ctx->ctx_pool = ses->ctx_pool;
101 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
107 void caam_jr_stats_get(struct rte_cryptodev *dev,
108 struct rte_cryptodev_stats *stats)
110 struct caam_jr_qp **qp = (struct caam_jr_qp **)
111 dev->data->queue_pairs;
114 PMD_INIT_FUNC_TRACE();
116 CAAM_JR_ERR("Invalid stats ptr NULL");
119 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
121 CAAM_JR_WARN("Uninitialised queue pair");
125 stats->enqueued_count += qp[i]->tx_pkts;
126 stats->dequeued_count += qp[i]->rx_pkts;
127 stats->enqueue_err_count += qp[i]->tx_errs;
128 stats->dequeue_err_count += qp[i]->rx_errs;
129 CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
130 "\n\tTX Ring Full = %" PRIu64,
132 qp[i]->tx_ring_full);
137 void caam_jr_stats_reset(struct rte_cryptodev *dev)
140 struct caam_jr_qp **qp = (struct caam_jr_qp **)
141 (dev->data->queue_pairs);
143 PMD_INIT_FUNC_TRACE();
144 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
146 CAAM_JR_WARN("Uninitialised queue pair");
151 qp[i]->rx_poll_err = 0;
154 qp[i]->tx_ring_full = 0;
159 is_cipher_only(struct caam_jr_session *ses)
161 PMD_INIT_FUNC_TRACE();
162 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
163 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
167 is_auth_only(struct caam_jr_session *ses)
169 PMD_INIT_FUNC_TRACE();
170 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
171 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
175 is_aead(struct caam_jr_session *ses)
177 PMD_INIT_FUNC_TRACE();
178 return ((ses->cipher_alg == 0) &&
179 (ses->auth_alg == 0) &&
180 (ses->aead_alg != 0));
184 is_auth_cipher(struct caam_jr_session *ses)
186 PMD_INIT_FUNC_TRACE();
187 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
188 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
189 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
193 is_proto_ipsec(struct caam_jr_session *ses)
195 PMD_INIT_FUNC_TRACE();
196 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
200 is_encode(struct caam_jr_session *ses)
202 PMD_INIT_FUNC_TRACE();
203 return ses->dir == DIR_ENC;
207 is_decode(struct caam_jr_session *ses)
209 PMD_INIT_FUNC_TRACE();
210 return ses->dir == DIR_DEC;
214 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
216 PMD_INIT_FUNC_TRACE();
217 switch (ses->auth_alg) {
218 case RTE_CRYPTO_AUTH_NULL:
219 ses->digest_length = 0;
221 case RTE_CRYPTO_AUTH_MD5_HMAC:
223 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
224 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
225 alginfo_a->algmode = OP_ALG_AAI_HMAC;
227 case RTE_CRYPTO_AUTH_SHA1_HMAC:
229 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
230 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
231 alginfo_a->algmode = OP_ALG_AAI_HMAC;
233 case RTE_CRYPTO_AUTH_SHA224_HMAC:
235 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
236 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
237 alginfo_a->algmode = OP_ALG_AAI_HMAC;
239 case RTE_CRYPTO_AUTH_SHA256_HMAC:
241 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
242 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
243 alginfo_a->algmode = OP_ALG_AAI_HMAC;
245 case RTE_CRYPTO_AUTH_SHA384_HMAC:
247 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
248 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
249 alginfo_a->algmode = OP_ALG_AAI_HMAC;
251 case RTE_CRYPTO_AUTH_SHA512_HMAC:
253 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
254 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
255 alginfo_a->algmode = OP_ALG_AAI_HMAC;
258 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
263 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
265 PMD_INIT_FUNC_TRACE();
266 switch (ses->cipher_alg) {
267 case RTE_CRYPTO_CIPHER_NULL:
269 case RTE_CRYPTO_CIPHER_AES_CBC:
271 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
272 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
273 alginfo_c->algmode = OP_ALG_AAI_CBC;
275 case RTE_CRYPTO_CIPHER_3DES_CBC:
277 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
278 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
279 alginfo_c->algmode = OP_ALG_AAI_CBC;
281 case RTE_CRYPTO_CIPHER_AES_CTR:
283 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
284 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
285 alginfo_c->algmode = OP_ALG_AAI_CTR;
288 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
293 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
295 PMD_INIT_FUNC_TRACE();
296 switch (ses->aead_alg) {
297 case RTE_CRYPTO_AEAD_AES_GCM:
298 alginfo->algtype = OP_ALG_ALGSEL_AES;
299 alginfo->algmode = OP_ALG_AAI_GCM;
302 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
306 /* prepare command block of the session */
308 caam_jr_prep_cdb(struct caam_jr_session *ses)
310 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
311 int32_t shared_desc_len = 0;
314 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
320 PMD_INIT_FUNC_TRACE();
322 caam_jr_dma_free(ses->cdb);
324 cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
326 CAAM_JR_ERR("failed to allocate memory for cdb\n");
332 memset(cdb, 0, sizeof(struct sec_cdb));
334 if (is_cipher_only(ses)) {
335 caam_cipher_alg(ses, &alginfo_c);
336 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
337 CAAM_JR_ERR("not supported cipher alg");
342 alginfo_c.key = (size_t)ses->cipher_key.data;
343 alginfo_c.keylen = ses->cipher_key.length;
344 alginfo_c.key_enc_flags = 0;
345 alginfo_c.key_type = RTA_DATA_IMM;
347 shared_desc_len = cnstr_shdsc_blkcipher(
353 } else if (is_auth_only(ses)) {
354 caam_auth_alg(ses, &alginfo_a);
355 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
356 CAAM_JR_ERR("not supported auth alg");
361 alginfo_a.key = (size_t)ses->auth_key.data;
362 alginfo_a.keylen = ses->auth_key.length;
363 alginfo_a.key_enc_flags = 0;
364 alginfo_a.key_type = RTA_DATA_IMM;
366 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
370 } else if (is_aead(ses)) {
371 caam_aead_alg(ses, &alginfo);
372 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
373 CAAM_JR_ERR("not supported aead alg");
377 alginfo.key = (size_t)ses->aead_key.data;
378 alginfo.keylen = ses->aead_key.length;
379 alginfo.key_enc_flags = 0;
380 alginfo.key_type = RTA_DATA_IMM;
382 if (ses->dir == DIR_ENC)
383 shared_desc_len = cnstr_shdsc_gcm_encap(
384 cdb->sh_desc, true, swap,
389 shared_desc_len = cnstr_shdsc_gcm_decap(
390 cdb->sh_desc, true, swap,
395 caam_cipher_alg(ses, &alginfo_c);
396 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
397 CAAM_JR_ERR("not supported cipher alg");
402 alginfo_c.key = (size_t)ses->cipher_key.data;
403 alginfo_c.keylen = ses->cipher_key.length;
404 alginfo_c.key_enc_flags = 0;
405 alginfo_c.key_type = RTA_DATA_IMM;
407 caam_auth_alg(ses, &alginfo_a);
408 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
409 CAAM_JR_ERR("not supported auth alg");
414 alginfo_a.key = (size_t)ses->auth_key.data;
415 alginfo_a.keylen = ses->auth_key.length;
416 alginfo_a.key_enc_flags = 0;
417 alginfo_a.key_type = RTA_DATA_IMM;
419 cdb->sh_desc[0] = alginfo_c.keylen;
420 cdb->sh_desc[1] = alginfo_a.keylen;
421 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
423 (unsigned int *)cdb->sh_desc,
424 &cdb->sh_desc[2], 2);
427 CAAM_JR_ERR("Crypto: Incorrect key lengths");
431 if (cdb->sh_desc[2] & 1)
432 alginfo_c.key_type = RTA_DATA_IMM;
434 alginfo_c.key = (size_t)caam_jr_mem_vtop(
435 (void *)(size_t)alginfo_c.key);
436 alginfo_c.key_type = RTA_DATA_PTR;
438 if (cdb->sh_desc[2] & (1<<1))
439 alginfo_a.key_type = RTA_DATA_IMM;
441 alginfo_a.key = (size_t)caam_jr_mem_vtop(
442 (void *)(size_t)alginfo_a.key);
443 alginfo_a.key_type = RTA_DATA_PTR;
448 if (is_proto_ipsec(ses)) {
449 if (ses->dir == DIR_ENC) {
450 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
452 true, swap, SHR_SERIAL,
454 (uint8_t *)&ses->ip4_hdr,
455 &alginfo_c, &alginfo_a);
456 } else if (ses->dir == DIR_DEC) {
457 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
459 true, swap, SHR_SERIAL,
461 &alginfo_c, &alginfo_a);
464 /* Auth_only_len is set as 0 here and it will be
465 * overwritten in fd for each packet.
467 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
468 true, swap, &alginfo_c, &alginfo_a,
470 ses->digest_length, ses->dir);
474 if (shared_desc_len < 0) {
475 CAAM_JR_ERR("error in preparing command block");
476 return shared_desc_len;
480 SEC_DUMP_DESC(cdb->sh_desc);
483 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
488 /* @brief Poll the HW for already processed jobs in the JR
489 * and silently discard the available jobs or notify them to UA
490 * with indicated error code.
492 * @param [in,out] job_ring The job ring to poll.
493 * @param [in] do_notify Can be #TRUE or #FALSE. Indicates if
494 * descriptors are to be discarded
495 * or notified to UA with given error_code.
496 * @param [out] notified_descs Number of notified descriptors. Can be NULL
497 * if do_notify is #FALSE
500 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
502 uint32_t *notified_descs)
504 int32_t jobs_no_to_discard = 0;
505 int32_t discarded_descs_no = 0;
507 PMD_INIT_FUNC_TRACE();
508 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
509 job_ring, job_ring->pidx, job_ring->cidx, do_notify);
511 jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
513 /* Discard all jobs */
514 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
515 job_ring, job_ring->pidx, job_ring->cidx,
518 while (jobs_no_to_discard > discarded_descs_no) {
519 discarded_descs_no++;
520 /* Now increment the consumer index for the current job ring,
521 * AFTER saving job in temporary location!
522 * Increment the consumer index for the current job ring
524 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
527 hw_remove_entries(job_ring, 1);
530 if (do_notify == true) {
531 ASSERT(notified_descs != NULL);
532 *notified_descs = discarded_descs_no;
536 /* @brief Poll the HW for already processed jobs in the JR
537 * and notify the available jobs to UA.
539 * @param [in] job_ring The job ring to poll.
540 * @param [in] limit The maximum number of jobs to notify.
541 * If set to negative value, all available jobs are
544 * @retval >=0 for No of jobs notified to UA.
545 * @retval -1 for error
548 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
549 struct rte_crypto_op **ops, int32_t limit,
550 struct caam_jr_qp *jr_qp)
552 int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
553 int32_t number_of_jobs_available = 0;
554 int32_t notified_descs_no = 0;
555 uint32_t sec_error_code = 0;
556 struct job_descriptor *current_desc;
557 phys_addr_t current_desc_addr;
558 phys_addr_t *temp_addr;
559 struct caam_jr_op_ctx *ctx;
561 PMD_INIT_FUNC_TRACE();
562 /* TODO check for ops have memory*/
563 /* check here if any JR error that cannot be written
564 * in the output status word has occurred
566 if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
567 CAAM_JR_INFO("err received");
568 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
569 GET_JR_REG(JRINT, job_ring));
570 if (unlikely(sec_error_code)) {
571 hw_job_ring_error_print(job_ring, sec_error_code);
575 /* compute the number of jobs available in the job ring based on the
576 * producer and consumer index values.
578 number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
579 /* Compute the number of notifications that need to be raised to UA
580 * If limit > total number of done jobs -> notify all done jobs
581 * If limit = 0 -> error
582 * If limit < total number of done jobs -> notify a number
583 * of done jobs equal with limit
585 jobs_no_to_notify = (limit > number_of_jobs_available) ?
586 number_of_jobs_available : limit;
588 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
589 job_ring, job_ring->pidx, job_ring->cidx,
590 limit, number_of_jobs_available, jobs_no_to_notify);
594 while (jobs_no_to_notify > notified_descs_no) {
595 static uint64_t false_alarm;
596 static uint64_t real_poll;
598 /* Get job status here */
599 sec_error_code = job_ring->output_ring[job_ring->cidx].status;
600 /* Get completed descriptor */
601 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
602 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
605 /* todo check if it is false alarm no desc present */
606 if (!current_desc_addr) {
608 printf("false alarm %" PRIu64 "real %" PRIu64
609 " sec_err =0x%x cidx Index =0%d\n",
610 false_alarm, real_poll,
611 sec_error_code, job_ring->cidx);
612 rte_panic("CAAM JR descriptor NULL");
613 return notified_descs_no;
615 current_desc = (struct job_descriptor *)
616 caam_jr_dma_ptov(current_desc_addr);
617 /* now increment the consumer index for the current job ring,
618 * AFTER saving job in temporary location!
620 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
622 /* Signal that the job has been processed and the slot is free*/
623 hw_remove_entries(job_ring, 1);
624 /*TODO for multiple ops, packets*/
625 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
626 if (unlikely(sec_error_code)) {
627 CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
628 job_ring->cidx, sec_error_code);
629 hw_handle_job_ring_error(job_ring, sec_error_code);
630 //todo improve with exact errors
631 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
634 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
636 if (ctx->op->sym->m_dst) {
637 rte_hexdump(stdout, "PROCESSED",
638 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
639 rte_pktmbuf_data_len(ctx->op->sym->m_dst));
641 rte_hexdump(stdout, "PROCESSED",
642 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
643 rte_pktmbuf_data_len(ctx->op->sym->m_src));
647 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
650 if (ctx->op->sym->m_dst) {
651 /*TODO check for ip header or other*/
652 ip4_hdr = (struct ip *)
653 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
654 ctx->op->sym->m_dst->pkt_len =
655 rte_be_to_cpu_16(ip4_hdr->ip_len);
656 ctx->op->sym->m_dst->data_len =
657 rte_be_to_cpu_16(ip4_hdr->ip_len);
659 ip4_hdr = (struct ip *)
660 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
661 ctx->op->sym->m_src->pkt_len =
662 rte_be_to_cpu_16(ip4_hdr->ip_len);
663 ctx->op->sym->m_src->data_len =
664 rte_be_to_cpu_16(ip4_hdr->ip_len);
668 caam_jr_op_ending(ctx);
672 return notified_descs_no;
676 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
679 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
680 struct sec_job_ring_t *ring = jr_qp->ring;
684 PMD_INIT_FUNC_TRACE();
685 CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
688 * If nb_ops < 0 -> poll JR until no more notifications are available.
689 * If nb_ops > 0 -> poll JR until limit is reached.
692 /* Run hw poll job ring */
693 num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
695 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
699 CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
701 if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
702 if (num_rx < nb_ops) {
703 ret = caam_jr_enable_irqs(ring->irq_fd);
704 SEC_ASSERT(ret == 0, ret,
705 "Failed to enable irqs for job ring %p", ring);
707 } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
709 /* Always enable IRQ generation when in pure IRQ mode */
710 ret = caam_jr_enable_irqs(ring->irq_fd);
711 SEC_ASSERT(ret == 0, ret,
712 "Failed to enable irqs for job ring %p", ring);
715 jr_qp->rx_pkts += num_rx;
722 * |<----data_len------->|
723 * |ip_header|ah_header|icv|payload|
728 static inline struct caam_jr_op_ctx *
729 build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
731 struct rte_crypto_sym_op *sym = op->sym;
732 struct rte_mbuf *mbuf = sym->m_src;
733 struct caam_jr_op_ctx *ctx;
734 struct sec4_sg_entry *sg;
737 uint64_t sdesc_offset;
738 struct sec_job_descriptor_t *jobdescr;
741 PMD_INIT_FUNC_TRACE();
747 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
748 CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
753 ctx = caam_jr_alloc_ctx(ses);
760 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
762 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
764 SEC_JD_INIT(jobdescr);
765 SEC_JD_SET_SD(jobdescr,
766 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
767 cdb->sh_hdr.hi.field.idlen);
770 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
771 0, ses->digest_length);
775 length = sym->auth.data.length;
776 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
777 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
779 /* Successive segs */
783 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
784 sg->len = cpu_to_caam32(mbuf->data_len);
788 if (is_decode(ses)) {
789 /* digest verification case */
791 /* hash result or digest, save digest first */
792 rte_memcpy(ctx->digest, sym->auth.digest.data,
795 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
797 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
798 sg->len = cpu_to_caam32(ses->digest_length);
799 length += ses->digest_length;
801 length -= ses->digest_length;
805 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
807 SEC_JD_SET_IN_PTR(jobdescr,
808 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
809 /* enabling sg list */
810 (jobdescr)->seq_in.command.word |= 0x01000000;
815 static inline struct caam_jr_op_ctx *
816 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
818 struct rte_crypto_sym_op *sym = op->sym;
819 struct caam_jr_op_ctx *ctx;
820 struct sec4_sg_entry *sg;
821 rte_iova_t start_addr;
823 uint64_t sdesc_offset;
824 struct sec_job_descriptor_t *jobdescr;
826 PMD_INIT_FUNC_TRACE();
827 ctx = caam_jr_alloc_ctx(ses);
834 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
836 start_addr = rte_pktmbuf_iova(sym->m_src);
838 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
840 SEC_JD_INIT(jobdescr);
841 SEC_JD_SET_SD(jobdescr,
842 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
843 cdb->sh_hdr.hi.field.idlen);
846 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
847 0, ses->digest_length);
850 if (is_decode(ses)) {
852 SEC_JD_SET_IN_PTR(jobdescr,
853 (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
854 (sym->auth.data.length + ses->digest_length));
855 /* enabling sg list */
856 (jobdescr)->seq_in.command.word |= 0x01000000;
858 /* hash result or digest, save digest first */
859 rte_memcpy(ctx->digest, sym->auth.digest.data,
861 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
862 sg->len = cpu_to_caam32(sym->auth.data.length);
865 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
867 /* let's check digest by hw */
869 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
870 sg->len = cpu_to_caam32(ses->digest_length);
872 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
874 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
875 sym->auth.data.offset, sym->auth.data.length);
880 static inline struct caam_jr_op_ctx *
881 build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
883 struct rte_crypto_sym_op *sym = op->sym;
884 struct rte_mbuf *mbuf = sym->m_src;
885 struct caam_jr_op_ctx *ctx;
886 struct sec4_sg_entry *sg, *in_sg;
889 uint64_t sdesc_offset;
890 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
892 struct sec_job_descriptor_t *jobdescr;
895 PMD_INIT_FUNC_TRACE();
898 reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
901 reg_segs = mbuf->nb_segs * 2 + 2;
904 if (reg_segs > MAX_SG_ENTRIES) {
905 CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
910 ctx = caam_jr_alloc_ctx(ses);
916 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
918 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
920 SEC_JD_INIT(jobdescr);
921 SEC_JD_SET_SD(jobdescr,
922 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
923 cdb->sh_hdr.hi.field.idlen);
926 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
927 sym->m_src->data_off, sym->cipher.data.offset,
928 sym->cipher.data.length, ses->iv.length);
937 length = sym->cipher.data.length;
939 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
940 + sym->cipher.data.offset);
941 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
943 /* Successive segs */
947 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
948 sg->len = cpu_to_caam32(mbuf->data_len);
952 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
954 SEC_JD_SET_OUT_PTR(jobdescr,
955 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
958 (jobdescr)->seq_out.command.word |= 0x01000000;
965 length = sym->cipher.data.length + ses->iv.length;
968 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
969 sg->len = cpu_to_caam32(ses->iv.length);
973 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
974 + sym->cipher.data.offset);
975 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
977 /* Successive segs */
981 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
982 sg->len = cpu_to_caam32(mbuf->data_len);
986 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
989 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
992 (jobdescr)->seq_in.command.word |= 0x01000000;
997 static inline struct caam_jr_op_ctx *
998 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
1000 struct rte_crypto_sym_op *sym = op->sym;
1001 struct caam_jr_op_ctx *ctx;
1002 struct sec4_sg_entry *sg;
1003 rte_iova_t src_start_addr, dst_start_addr;
1004 struct sec_cdb *cdb;
1005 uint64_t sdesc_offset;
1006 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1008 struct sec_job_descriptor_t *jobdescr;
1010 PMD_INIT_FUNC_TRACE();
1011 ctx = caam_jr_alloc_ctx(ses);
1017 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1019 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1021 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1023 dst_start_addr = src_start_addr;
1025 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1027 SEC_JD_INIT(jobdescr);
1028 SEC_JD_SET_SD(jobdescr,
1029 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1030 cdb->sh_hdr.hi.field.idlen);
1033 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
1034 sym->m_src->data_off, sym->cipher.data.offset,
1035 sym->cipher.data.length, ses->iv.length);
1038 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
1039 sym->cipher.data.offset,
1040 sym->cipher.data.length + ses->iv.length);
1044 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
1045 sym->cipher.data.length + ses->iv.length);
1046 /*enabling sg bit */
1047 (jobdescr)->seq_in.command.word |= 0x01000000;
1049 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1050 sg->len = cpu_to_caam32(ses->iv.length);
1053 sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
1054 sg->len = cpu_to_caam32(sym->cipher.data.length);
1056 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1061 /* For decapsulation:
1063 * +----+----------------+--------------------------------+-----+
1064 * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
1065 * +----+----------------+--------------------------------+-----+
1067 * +----+--------------------------+
1068 * | Decrypted & authenticated data |
1069 * +----+--------------------------+
1072 static inline struct caam_jr_op_ctx *
1073 build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
1075 struct rte_crypto_sym_op *sym = op->sym;
1076 struct caam_jr_op_ctx *ctx;
1077 struct sec4_sg_entry *sg, *out_sg, *in_sg;
1078 struct rte_mbuf *mbuf;
1079 uint32_t length = 0;
1080 struct sec_cdb *cdb;
1081 uint64_t sdesc_offset;
1083 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1085 struct sec_job_descriptor_t *jobdescr;
1086 uint32_t auth_only_len;
1088 PMD_INIT_FUNC_TRACE();
1089 auth_only_len = op->sym->auth.data.length -
1090 op->sym->cipher.data.length;
1094 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1097 req_segs = mbuf->nb_segs * 2 + 3;
1100 if (req_segs > MAX_SG_ENTRIES) {
1101 CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1106 ctx = caam_jr_alloc_ctx(ses);
1112 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1114 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1116 SEC_JD_INIT(jobdescr);
1117 SEC_JD_SET_SD(jobdescr,
1118 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1119 cdb->sh_hdr.hi.field.idlen);
1127 out_sg = &ctx->sg[0];
1129 length = sym->auth.data.length + ses->digest_length;
1131 length = sym->auth.data.length;
1136 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1137 + sym->auth.data.offset);
1138 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1140 /* Successive segs */
1144 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1145 sg->len = cpu_to_caam32(mbuf->data_len);
1149 if (is_encode(ses)) {
1150 /* set auth output */
1152 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1153 sg->len = cpu_to_caam32(ses->digest_length);
1156 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1158 SEC_JD_SET_OUT_PTR(jobdescr,
1159 (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
1161 (jobdescr)->seq_out.command.word |= 0x01000000;
1168 length = ses->iv.length + sym->auth.data.length;
1170 length = ses->iv.length + sym->auth.data.length
1171 + ses->digest_length;
1173 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1174 sg->len = cpu_to_caam32(ses->iv.length);
1178 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1179 + sym->auth.data.offset);
1180 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1182 /* Successive segs */
1186 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1187 sg->len = cpu_to_caam32(mbuf->data_len);
1191 if (is_decode(ses)) {
1193 rte_memcpy(ctx->digest, sym->auth.digest.data,
1194 ses->digest_length);
1195 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1196 sg->len = cpu_to_caam32(ses->digest_length);
1199 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1201 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
1204 (jobdescr)->seq_in.command.word |= 0x01000000;
1205 /* Auth_only_len is set as 0 in descriptor and it is
1206 * overwritten here in the jd which will update
1211 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1216 static inline struct caam_jr_op_ctx *
1217 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
1219 struct rte_crypto_sym_op *sym = op->sym;
1220 struct caam_jr_op_ctx *ctx;
1221 struct sec4_sg_entry *sg;
1222 rte_iova_t src_start_addr, dst_start_addr;
1223 uint32_t length = 0;
1224 struct sec_cdb *cdb;
1225 uint64_t sdesc_offset;
1226 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1228 struct sec_job_descriptor_t *jobdescr;
1229 uint32_t auth_only_len;
1231 PMD_INIT_FUNC_TRACE();
1232 auth_only_len = op->sym->auth.data.length -
1233 op->sym->cipher.data.length;
1235 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1237 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1239 dst_start_addr = src_start_addr;
1241 ctx = caam_jr_alloc_ctx(ses);
1247 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1249 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1251 SEC_JD_INIT(jobdescr);
1252 SEC_JD_SET_SD(jobdescr,
1253 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1254 cdb->sh_hdr.hi.field.idlen);
1258 if (is_encode(ses)) {
1259 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1260 sg->len = cpu_to_caam32(ses->iv.length);
1261 length += ses->iv.length;
1264 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1265 sg->len = cpu_to_caam32(sym->auth.data.length);
1266 length += sym->auth.data.length;
1268 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1270 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1271 sg->len = cpu_to_caam32(ses->iv.length);
1272 length += ses->iv.length;
1275 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1276 sg->len = cpu_to_caam32(sym->auth.data.length);
1277 length += sym->auth.data.length;
1279 rte_memcpy(ctx->digest, sym->auth.digest.data,
1280 ses->digest_length);
1282 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1283 sg->len = cpu_to_caam32(ses->digest_length);
1284 length += ses->digest_length;
1286 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1289 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
1292 (jobdescr)->seq_in.command.word |= 0x01000000;
1297 sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
1298 sg->len = cpu_to_caam32(sym->cipher.data.length);
1299 length = sym->cipher.data.length;
1301 if (is_encode(ses)) {
1302 /* set auth output */
1304 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1305 sg->len = cpu_to_caam32(ses->digest_length);
1306 length += ses->digest_length;
1309 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1311 SEC_JD_SET_OUT_PTR(jobdescr,
1312 (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
1314 (jobdescr)->seq_out.command.word |= 0x01000000;
1316 /* Auth_only_len is set as 0 in descriptor and it is
1317 * overwritten here in the jd which will update
1322 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1327 static inline struct caam_jr_op_ctx *
1328 build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
1330 struct rte_crypto_sym_op *sym = op->sym;
1331 struct caam_jr_op_ctx *ctx = NULL;
1332 phys_addr_t src_start_addr, dst_start_addr;
1333 struct sec_cdb *cdb;
1334 uint64_t sdesc_offset;
1335 struct sec_job_descriptor_t *jobdescr;
1337 PMD_INIT_FUNC_TRACE();
1338 ctx = caam_jr_alloc_ctx(ses);
1343 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1345 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1347 dst_start_addr = src_start_addr;
1350 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1352 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1354 SEC_JD_INIT(jobdescr);
1355 SEC_JD_SET_SD(jobdescr,
1356 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1357 cdb->sh_hdr.hi.field.idlen);
1360 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
1361 sym->m_src->buf_len - sym->m_src->data_off);
1363 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
1364 sym->m_src->pkt_len);
1365 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1371 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
1373 struct sec_job_ring_t *ring = qp->ring;
1374 struct caam_jr_session *ses;
1375 struct caam_jr_op_ctx *ctx = NULL;
1376 struct sec_job_descriptor_t *jobdescr __rte_unused;
1378 PMD_INIT_FUNC_TRACE();
1379 switch (op->sess_type) {
1380 case RTE_CRYPTO_OP_WITH_SESSION:
1381 ses = (struct caam_jr_session *)
1382 get_sym_session_private_data(op->sym->session,
1383 cryptodev_driver_id);
1385 case RTE_CRYPTO_OP_SECURITY_SESSION:
1386 ses = (struct caam_jr_session *)
1387 get_sec_session_private_data(
1388 op->sym->sec_session);
1391 CAAM_JR_DP_ERR("sessionless crypto op not supported");
1396 if (unlikely(!ses->qp || ses->qp != qp)) {
1397 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
1399 caam_jr_prep_cdb(ses);
1402 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1403 if (is_auth_cipher(ses))
1404 ctx = build_cipher_auth(op, ses);
1405 else if (is_aead(ses))
1407 else if (is_auth_only(ses))
1408 ctx = build_auth_only(op, ses);
1409 else if (is_cipher_only(ses))
1410 ctx = build_cipher_only(op, ses);
1411 else if (is_proto_ipsec(ses))
1412 ctx = build_proto(op, ses);
1414 if (is_auth_cipher(ses))
1415 ctx = build_cipher_auth_sg(op, ses);
1416 else if (is_aead(ses))
1418 else if (is_auth_only(ses))
1419 ctx = build_auth_only_sg(op, ses);
1420 else if (is_cipher_only(ses))
1421 ctx = build_cipher_only_sg(op, ses);
1424 if (unlikely(!ctx)) {
1426 CAAM_JR_ERR("not supported sec op");
1431 rte_hexdump(stdout, "DECODE",
1432 rte_pktmbuf_mtod(op->sym->m_src, void *),
1433 rte_pktmbuf_data_len(op->sym->m_src));
1435 rte_hexdump(stdout, "ENCODE",
1436 rte_pktmbuf_mtod(op->sym->m_src, void *),
1437 rte_pktmbuf_data_len(op->sym->m_src));
1439 printf("\n JD before conversion\n");
1440 for (int i = 0; i < 12; i++)
1441 printf("\n 0x%08x", ctx->jobdes.desc[i]);
1444 CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
1445 ring, ring->pidx, ring->cidx);
1447 /* todo - do we want to retry */
1448 if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
1449 SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
1450 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
1451 ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
1452 caam_jr_op_ending(ctx);
1457 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
1458 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1460 jobdescr->deschdr.command.word =
1461 cpu_to_caam32(jobdescr->deschdr.command.word);
1462 jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
1463 jobdescr->seq_out.command.word =
1464 cpu_to_caam32(jobdescr->seq_out.command.word);
1465 jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
1466 jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
1467 jobdescr->seq_in.command.word =
1468 cpu_to_caam32(jobdescr->seq_in.command.word);
1469 jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
1470 jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
1471 jobdescr->load_dpovrd.command.word =
1472 cpu_to_caam32(jobdescr->load_dpovrd.command.word);
1473 jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
1476 /* Set ptr in input ring to current descriptor */
1477 sec_write_addr(&ring->input_ring[ring->pidx],
1478 (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
1481 /* Notify HW that a new job is enqueued */
1482 hw_enqueue_desc_on_job_ring(ring);
1484 /* increment the producer index for the current job ring */
1485 ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
1491 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1494 /* Function to transmit the frames to given device and queuepair */
1497 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
1498 uint16_t num_tx = 0;
1500 PMD_INIT_FUNC_TRACE();
1501 /*Prepare each packet which is to be sent*/
1502 for (loop = 0; loop < nb_ops; loop++) {
1503 ret = caam_jr_enqueue_op(ops[loop], jr_qp);
1508 jr_qp->tx_pkts += num_tx;
1513 /* Release queue pair */
1515 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1518 struct sec_job_ring_t *internals;
1519 struct caam_jr_qp *qp = NULL;
1521 PMD_INIT_FUNC_TRACE();
1522 CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1524 internals = dev->data->dev_private;
1525 if (qp_id >= internals->max_nb_queue_pairs) {
1526 CAAM_JR_ERR("Max supported qpid %d",
1527 internals->max_nb_queue_pairs);
1531 qp = &internals->qps[qp_id];
1533 dev->data->queue_pairs[qp_id] = NULL;
1538 /* Setup a queue pair */
1540 caam_jr_queue_pair_setup(
1541 struct rte_cryptodev *dev, uint16_t qp_id,
1542 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1543 __rte_unused int socket_id)
1545 struct sec_job_ring_t *internals;
1546 struct caam_jr_qp *qp = NULL;
1548 PMD_INIT_FUNC_TRACE();
1549 CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1551 internals = dev->data->dev_private;
1552 if (qp_id >= internals->max_nb_queue_pairs) {
1553 CAAM_JR_ERR("Max supported qpid %d",
1554 internals->max_nb_queue_pairs);
1558 qp = &internals->qps[qp_id];
1559 qp->ring = internals;
1560 dev->data->queue_pairs[qp_id] = qp;
1565 /* Return the number of allocated queue pairs */
1567 caam_jr_queue_pair_count(struct rte_cryptodev *dev)
1569 PMD_INIT_FUNC_TRACE();
1571 return dev->data->nb_queue_pairs;
1574 /* Returns the size of the aesni gcm session structure */
1576 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1578 PMD_INIT_FUNC_TRACE();
1580 return sizeof(struct caam_jr_session);
1584 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1585 struct rte_crypto_sym_xform *xform,
1586 struct caam_jr_session *session)
1588 PMD_INIT_FUNC_TRACE();
1589 session->cipher_alg = xform->cipher.algo;
1590 session->iv.length = xform->cipher.iv.length;
1591 session->iv.offset = xform->cipher.iv.offset;
1592 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1593 RTE_CACHE_LINE_SIZE);
1594 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1595 CAAM_JR_ERR("No Memory for cipher key\n");
1598 session->cipher_key.length = xform->cipher.key.length;
1600 memcpy(session->cipher_key.data, xform->cipher.key.data,
1601 xform->cipher.key.length);
1602 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1609 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1610 struct rte_crypto_sym_xform *xform,
1611 struct caam_jr_session *session)
1613 PMD_INIT_FUNC_TRACE();
1614 session->auth_alg = xform->auth.algo;
1615 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1616 RTE_CACHE_LINE_SIZE);
1617 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1618 CAAM_JR_ERR("No Memory for auth key\n");
1621 session->auth_key.length = xform->auth.key.length;
1622 session->digest_length = xform->auth.digest_length;
1624 memcpy(session->auth_key.data, xform->auth.key.data,
1625 xform->auth.key.length);
1626 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1633 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1634 struct rte_crypto_sym_xform *xform,
1635 struct caam_jr_session *session)
1637 PMD_INIT_FUNC_TRACE();
1638 session->aead_alg = xform->aead.algo;
1639 session->iv.length = xform->aead.iv.length;
1640 session->iv.offset = xform->aead.iv.offset;
1641 session->auth_only_len = xform->aead.aad_length;
1642 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1643 RTE_CACHE_LINE_SIZE);
1644 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1645 CAAM_JR_ERR("No Memory for aead key\n");
1648 session->aead_key.length = xform->aead.key.length;
1649 session->digest_length = xform->aead.digest_length;
1651 memcpy(session->aead_key.data, xform->aead.key.data,
1652 xform->aead.key.length);
1653 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1660 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1661 struct rte_crypto_sym_xform *xform, void *sess)
1663 struct sec_job_ring_t *internals = dev->data->dev_private;
1664 struct caam_jr_session *session = sess;
1666 PMD_INIT_FUNC_TRACE();
1668 if (unlikely(sess == NULL)) {
1669 CAAM_JR_ERR("invalid session struct");
1673 /* Default IV length = 0 */
1674 session->iv.length = 0;
1677 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1678 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1679 caam_jr_cipher_init(dev, xform, session);
1681 /* Authentication Only */
1682 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1683 xform->next == NULL) {
1684 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1685 caam_jr_auth_init(dev, xform, session);
1687 /* Cipher then Authenticate */
1688 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1689 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1690 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1691 caam_jr_cipher_init(dev, xform, session);
1692 caam_jr_auth_init(dev, xform->next, session);
1694 CAAM_JR_ERR("Not supported: Auth then Cipher");
1698 /* Authenticate then Cipher */
1699 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1700 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1701 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1702 caam_jr_auth_init(dev, xform, session);
1703 caam_jr_cipher_init(dev, xform->next, session);
1705 CAAM_JR_ERR("Not supported: Auth then Cipher");
1709 /* AEAD operation for AES-GCM kind of Algorithms */
1710 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1711 xform->next == NULL) {
1712 caam_jr_aead_init(dev, xform, session);
1715 CAAM_JR_ERR("Invalid crypto type");
1718 session->ctx_pool = internals->ctx_pool;
1723 rte_free(session->cipher_key.data);
1724 rte_free(session->auth_key.data);
1725 memset(session, 0, sizeof(struct caam_jr_session));
1731 caam_jr_sym_session_configure(struct rte_cryptodev *dev,
1732 struct rte_crypto_sym_xform *xform,
1733 struct rte_cryptodev_sym_session *sess,
1734 struct rte_mempool *mempool)
1736 void *sess_private_data;
1739 PMD_INIT_FUNC_TRACE();
1741 if (rte_mempool_get(mempool, &sess_private_data)) {
1742 CAAM_JR_ERR("Couldn't get object from session mempool");
1746 memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1747 ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1749 CAAM_JR_ERR("failed to configure session parameters");
1750 /* Return session to mempool */
1751 rte_mempool_put(mempool, sess_private_data);
1755 set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
1760 /* Clear the memory of session so it doesn't leave key material behind */
1762 caam_jr_sym_session_clear(struct rte_cryptodev *dev,
1763 struct rte_cryptodev_sym_session *sess)
1765 uint8_t index = dev->driver_id;
1766 void *sess_priv = get_sym_session_private_data(sess, index);
1767 struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1769 PMD_INIT_FUNC_TRACE();
1772 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1774 rte_free(s->cipher_key.data);
1775 rte_free(s->auth_key.data);
1776 memset(s, 0, sizeof(struct caam_jr_session));
1777 set_sym_session_private_data(sess, index, NULL);
1778 rte_mempool_put(sess_mp, sess_priv);
1783 caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1784 struct rte_security_session_conf *conf,
1787 struct sec_job_ring_t *internals = dev->data->dev_private;
1788 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1789 struct rte_crypto_auth_xform *auth_xform;
1790 struct rte_crypto_cipher_xform *cipher_xform;
1791 struct caam_jr_session *session = (struct caam_jr_session *)sess;
1793 PMD_INIT_FUNC_TRACE();
1795 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1796 cipher_xform = &conf->crypto_xform->cipher;
1797 auth_xform = &conf->crypto_xform->next->auth;
1799 auth_xform = &conf->crypto_xform->auth;
1800 cipher_xform = &conf->crypto_xform->next->cipher;
1802 session->proto_alg = conf->protocol;
1803 session->cipher_key.data = rte_zmalloc(NULL,
1804 cipher_xform->key.length,
1805 RTE_CACHE_LINE_SIZE);
1806 if (session->cipher_key.data == NULL &&
1807 cipher_xform->key.length > 0) {
1808 CAAM_JR_ERR("No Memory for cipher key\n");
1812 session->cipher_key.length = cipher_xform->key.length;
1813 session->auth_key.data = rte_zmalloc(NULL,
1814 auth_xform->key.length,
1815 RTE_CACHE_LINE_SIZE);
1816 if (session->auth_key.data == NULL &&
1817 auth_xform->key.length > 0) {
1818 CAAM_JR_ERR("No Memory for auth key\n");
1819 rte_free(session->cipher_key.data);
1822 session->auth_key.length = auth_xform->key.length;
1823 memcpy(session->cipher_key.data, cipher_xform->key.data,
1824 cipher_xform->key.length);
1825 memcpy(session->auth_key.data, auth_xform->key.data,
1826 auth_xform->key.length);
1828 switch (auth_xform->algo) {
1829 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1830 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1832 case RTE_CRYPTO_AUTH_MD5_HMAC:
1833 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1835 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1836 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1838 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1839 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1841 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1842 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1844 case RTE_CRYPTO_AUTH_AES_CMAC:
1845 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1847 case RTE_CRYPTO_AUTH_NULL:
1848 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1850 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1851 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1852 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1853 case RTE_CRYPTO_AUTH_SHA1:
1854 case RTE_CRYPTO_AUTH_SHA256:
1855 case RTE_CRYPTO_AUTH_SHA512:
1856 case RTE_CRYPTO_AUTH_SHA224:
1857 case RTE_CRYPTO_AUTH_SHA384:
1858 case RTE_CRYPTO_AUTH_MD5:
1859 case RTE_CRYPTO_AUTH_AES_GMAC:
1860 case RTE_CRYPTO_AUTH_KASUMI_F9:
1861 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1862 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1863 CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
1867 CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
1872 switch (cipher_xform->algo) {
1873 case RTE_CRYPTO_CIPHER_AES_CBC:
1874 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1876 case RTE_CRYPTO_CIPHER_3DES_CBC:
1877 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1879 case RTE_CRYPTO_CIPHER_AES_CTR:
1880 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1882 case RTE_CRYPTO_CIPHER_NULL:
1883 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1884 case RTE_CRYPTO_CIPHER_3DES_ECB:
1885 case RTE_CRYPTO_CIPHER_AES_ECB:
1886 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1887 CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
1888 cipher_xform->algo);
1891 CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
1892 cipher_xform->algo);
1896 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1897 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1898 sizeof(session->ip4_hdr));
1899 session->ip4_hdr.ip_v = IPVERSION;
1900 session->ip4_hdr.ip_hl = 5;
1901 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1902 sizeof(session->ip4_hdr));
1903 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1904 session->ip4_hdr.ip_id = 0;
1905 session->ip4_hdr.ip_off = 0;
1906 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1907 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1908 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1910 session->ip4_hdr.ip_sum = 0;
1911 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1912 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1913 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1914 (void *)&session->ip4_hdr,
1917 session->encap_pdb.options =
1918 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1919 PDBOPTS_ESP_OIHI_PDB_INL |
1921 PDBHMO_ESP_ENCAP_DTTL;
1922 session->encap_pdb.spi = ipsec_xform->spi;
1923 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1925 session->dir = DIR_ENC;
1926 } else if (ipsec_xform->direction ==
1927 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1928 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1929 session->decap_pdb.options = sizeof(struct ip) << 16;
1930 session->dir = DIR_DEC;
1933 session->ctx_pool = internals->ctx_pool;
1937 rte_free(session->auth_key.data);
1938 rte_free(session->cipher_key.data);
1939 memset(session, 0, sizeof(struct caam_jr_session));
1944 caam_jr_security_session_create(void *dev,
1945 struct rte_security_session_conf *conf,
1946 struct rte_security_session *sess,
1947 struct rte_mempool *mempool)
1949 void *sess_private_data;
1950 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1953 PMD_INIT_FUNC_TRACE();
1954 if (rte_mempool_get(mempool, &sess_private_data)) {
1955 CAAM_JR_ERR("Couldn't get object from session mempool");
1959 switch (conf->protocol) {
1960 case RTE_SECURITY_PROTOCOL_IPSEC:
1961 ret = caam_jr_set_ipsec_session(cdev, conf,
1964 case RTE_SECURITY_PROTOCOL_MACSEC:
1970 CAAM_JR_ERR("failed to configure session parameters");
1971 /* Return session to mempool */
1972 rte_mempool_put(mempool, sess_private_data);
1976 set_sec_session_private_data(sess, sess_private_data);
1981 /* Clear the memory of session so it doesn't leave key material behind */
1983 caam_jr_security_session_destroy(void *dev __rte_unused,
1984 struct rte_security_session *sess)
1986 PMD_INIT_FUNC_TRACE();
1987 void *sess_priv = get_sec_session_private_data(sess);
1989 struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1992 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1994 rte_free(s->cipher_key.data);
1995 rte_free(s->auth_key.data);
1996 memset(sess, 0, sizeof(struct caam_jr_session));
1997 set_sec_session_private_data(sess, NULL);
1998 rte_mempool_put(sess_mp, sess_priv);
2005 caam_jr_dev_configure(struct rte_cryptodev *dev,
2006 struct rte_cryptodev_config *config __rte_unused)
2009 struct sec_job_ring_t *internals;
2011 PMD_INIT_FUNC_TRACE();
2013 internals = dev->data->dev_private;
2014 snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
2015 if (!internals->ctx_pool) {
2016 internals->ctx_pool = rte_mempool_create((const char *)str,
2018 sizeof(struct caam_jr_op_ctx),
2019 CTX_POOL_CACHE_SIZE, 0,
2020 NULL, NULL, NULL, NULL,
2022 if (!internals->ctx_pool) {
2023 CAAM_JR_ERR("%s create failed\n", str);
2027 CAAM_JR_INFO("mempool already created for dev_id : %d",
2034 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
2036 PMD_INIT_FUNC_TRACE();
2041 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
2043 PMD_INIT_FUNC_TRACE();
2047 caam_jr_dev_close(struct rte_cryptodev *dev)
2049 struct sec_job_ring_t *internals;
2051 PMD_INIT_FUNC_TRACE();
2056 internals = dev->data->dev_private;
2057 rte_mempool_free(internals->ctx_pool);
2058 internals->ctx_pool = NULL;
2064 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
2065 struct rte_cryptodev_info *info)
2067 struct sec_job_ring_t *internals = dev->data->dev_private;
2069 PMD_INIT_FUNC_TRACE();
2071 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2072 info->feature_flags = dev->feature_flags;
2073 info->capabilities = caam_jr_get_cryptodev_capabilities();
2074 info->sym.max_nb_sessions = internals->max_nb_sessions;
2075 info->driver_id = cryptodev_driver_id;
2079 static struct rte_cryptodev_ops caam_jr_ops = {
2080 .dev_configure = caam_jr_dev_configure,
2081 .dev_start = caam_jr_dev_start,
2082 .dev_stop = caam_jr_dev_stop,
2083 .dev_close = caam_jr_dev_close,
2084 .dev_infos_get = caam_jr_dev_infos_get,
2085 .stats_get = caam_jr_stats_get,
2086 .stats_reset = caam_jr_stats_reset,
2087 .queue_pair_setup = caam_jr_queue_pair_setup,
2088 .queue_pair_release = caam_jr_queue_pair_release,
2089 .queue_pair_count = caam_jr_queue_pair_count,
2090 .sym_session_get_size = caam_jr_sym_session_get_size,
2091 .sym_session_configure = caam_jr_sym_session_configure,
2092 .sym_session_clear = caam_jr_sym_session_clear
2095 static struct rte_security_ops caam_jr_security_ops = {
2096 .session_create = caam_jr_security_session_create,
2097 .session_update = NULL,
2098 .session_stats_get = NULL,
2099 .session_destroy = caam_jr_security_session_destroy,
2100 .set_pkt_metadata = NULL,
2101 .capabilities_get = caam_jr_get_security_capabilities
2104 /* @brief Flush job rings of any processed descs.
2105 * The processed descs are silently dropped,
2106 * WITHOUT being notified to UA.
2109 close_job_ring(struct sec_job_ring_t *job_ring)
2111 PMD_INIT_FUNC_TRACE();
2112 if (job_ring->irq_fd) {
2113 /* Producer index is frozen. If consumer index is not equal
2114 * with producer index, then we have descs to flush.
2116 while (job_ring->pidx != job_ring->cidx)
2117 hw_flush_job_ring(job_ring, false, NULL);
2119 /* free the uio job ring */
2120 free_job_ring(job_ring->irq_fd);
2121 job_ring->irq_fd = 0;
2122 caam_jr_dma_free(job_ring->input_ring);
2123 caam_jr_dma_free(job_ring->output_ring);
2128 /** @brief Release the software and hardware resources tied to a job ring.
2129 * @param [in] job_ring The job ring
2131 * @retval 0 for success
2132 * @retval -1 for error
2135 shutdown_job_ring(struct sec_job_ring_t *job_ring)
2139 PMD_INIT_FUNC_TRACE();
2140 ASSERT(job_ring != NULL);
2141 ret = hw_shutdown_job_ring(job_ring);
2142 SEC_ASSERT(ret == 0, ret,
2143 "Failed to shutdown hardware job ring %p",
2146 if (job_ring->coalescing_en)
2147 hw_job_ring_disable_coalescing(job_ring);
2149 if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
2150 ret = caam_jr_disable_irqs(job_ring->irq_fd);
2151 SEC_ASSERT(ret == 0, ret,
2152 "Failed to disable irqs for job ring %p",
2160 * @brief Release the resources used by the SEC user space driver.
2162 * Reset and release SEC's job rings indicated by the User Application at
2163 * init_job_ring() and free any memory allocated internally.
2164 * Call once during application tear down.
2166 * @note In case there are any descriptors in-flight (descriptors received by
2167 * SEC driver for processing and for which no response was yet provided to UA),
2168 * the descriptors are discarded without any notifications to User Application.
2170 * @retval ::0 is returned for a successful execution
2171 * @retval ::-1 is returned if SEC driver release is in progress
2174 caam_jr_dev_uninit(struct rte_cryptodev *dev)
2176 struct sec_job_ring_t *internals;
2178 PMD_INIT_FUNC_TRACE();
2182 internals = dev->data->dev_private;
2183 rte_free(dev->security_ctx);
2185 /* If any descriptors in flight , poll and wait
2186 * until all descriptors are received and silently discarded.
2189 shutdown_job_ring(internals);
2190 close_job_ring(internals);
2191 rte_mempool_free(internals->ctx_pool);
2194 CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
2196 /* last caam jr instance) */
2197 if (g_job_rings_no == 0)
2198 g_driver_state = SEC_DRIVER_STATE_IDLE;
2203 /* @brief Initialize the software and hardware resources tied to a job ring.
2204 * @param [in] jr_mode; Model to be used by SEC Driver to receive
2205 * notifications from SEC. Can be either
2206 * of the three: #SEC_NOTIFICATION_TYPE_NAPI
2207 * #SEC_NOTIFICATION_TYPE_IRQ or
2208 * #SEC_NOTIFICATION_TYPE_POLL
2209 * @param [in] NAPI_mode The NAPI work mode to configure a job ring at
2210 * startup. Used only when #SEC_NOTIFICATION_TYPE
2211 * is set to #SEC_NOTIFICATION_TYPE_NAPI.
2212 * @param [in] irq_coalescing_timer This value determines the maximum
2213 * amount of time after processing a
2214 * descriptor before raising an interrupt.
2215 * @param [in] irq_coalescing_count This value determines how many
2216 * descriptors are completed before
2217 * raising an interrupt.
2218 * @param [in] reg_base_addr, The job ring base address register
2219 * @param [in] irq_id The job ring interrupt identification number.
2220 * @retval job_ring_handle for successful job ring configuration
2221 * @retval NULL on error
2225 init_job_ring(void *reg_base_addr, uint32_t irq_id)
2227 struct sec_job_ring_t *job_ring = NULL;
2229 int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
2231 int irq_coalescing_timer = 0;
2232 int irq_coalescing_count = 0;
2234 for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
2235 if (g_job_rings[i].irq_fd == 0) {
2236 job_ring = &g_job_rings[i];
2241 if (job_ring == NULL) {
2242 CAAM_JR_ERR("No free job ring\n");
2246 job_ring->register_base_addr = reg_base_addr;
2247 job_ring->jr_mode = jr_mode;
2248 job_ring->napi_mode = 0;
2249 job_ring->irq_fd = irq_id;
2251 /* Allocate mem for input and output ring */
2253 /* Allocate memory for input ring */
2254 job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2255 SEC_DMA_MEM_INPUT_RING_SIZE);
2256 memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
2258 /* Allocate memory for output ring */
2259 job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2260 SEC_DMA_MEM_OUTPUT_RING_SIZE);
2261 memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
2263 /* Reset job ring in SEC hw and configure job ring registers */
2264 ret = hw_reset_job_ring(job_ring);
2266 CAAM_JR_ERR("Failed to reset hardware job ring");
2270 if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
2271 /* When SEC US driver works in NAPI mode, the UA can select
2272 * if the driver starts with IRQs on or off.
2274 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
2275 CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
2277 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2279 CAAM_JR_ERR("Failed to enable irqs for job ring");
2283 } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
2284 /* When SEC US driver works in pure interrupt mode,
2285 * IRQ's are always enabled.
2287 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
2289 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2291 CAAM_JR_ERR("Failed to enable irqs for job ring");
2295 if (irq_coalescing_timer || irq_coalescing_count) {
2296 hw_job_ring_set_coalescing_param(job_ring,
2297 irq_coalescing_timer,
2298 irq_coalescing_count);
2300 hw_job_ring_enable_coalescing(job_ring);
2301 job_ring->coalescing_en = 1;
2304 job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
2305 job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
2306 job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
2310 caam_jr_dma_free(job_ring->output_ring);
2311 caam_jr_dma_free(job_ring->input_ring);
2317 caam_jr_dev_init(const char *name,
2318 struct rte_vdev_device *vdev,
2319 struct rte_cryptodev_pmd_init_params *init_params)
2321 struct rte_cryptodev *dev;
2322 struct rte_security_ctx *security_instance;
2323 struct uio_job_ring *job_ring;
2324 char str[RTE_CRYPTODEV_NAME_MAX_LEN];
2326 PMD_INIT_FUNC_TRACE();
2328 /* Validate driver state */
2329 if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
2330 g_job_rings_max = sec_configure();
2331 if (!g_job_rings_max) {
2332 CAAM_JR_ERR("No job ring detected on UIO !!!!");
2335 /* Update driver state */
2336 g_driver_state = SEC_DRIVER_STATE_STARTED;
2339 if (g_job_rings_no >= g_job_rings_max) {
2340 CAAM_JR_ERR("No more job rings available max=%d!!!!",
2345 job_ring = config_job_ring();
2346 if (job_ring == NULL) {
2347 CAAM_JR_ERR("failed to create job ring");
2351 snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
2353 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2355 CAAM_JR_ERR("failed to create cryptodev vdev");
2358 /*TODO free it during teardown*/
2359 dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
2362 if (!dev->data->dev_private) {
2363 CAAM_JR_ERR("Ring memory allocation failed\n");
2367 dev->driver_id = cryptodev_driver_id;
2368 dev->dev_ops = &caam_jr_ops;
2370 /* register rx/tx burst functions for data path */
2371 dev->dequeue_burst = caam_jr_dequeue_burst;
2372 dev->enqueue_burst = caam_jr_enqueue_burst;
2373 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2374 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2375 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2376 RTE_CRYPTODEV_FF_SECURITY |
2377 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2378 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2379 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2380 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2381 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2383 /* For secondary processes, we don't initialise any further as primary
2384 * has already done this work. Only check we don't need a different
2387 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2388 CAAM_JR_WARN("Device already init by primary process");
2392 /*TODO free it during teardown*/
2393 security_instance = rte_malloc("caam_jr",
2394 sizeof(struct rte_security_ctx), 0);
2395 if (security_instance == NULL) {
2396 CAAM_JR_ERR("memory allocation failed\n");
2397 //todo error handling.
2401 security_instance->device = (void *)dev;
2402 security_instance->ops = &caam_jr_security_ops;
2403 security_instance->sess_cnt = 0;
2404 dev->security_ctx = security_instance;
2406 RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
2411 caam_jr_dev_uninit(dev);
2412 rte_cryptodev_pmd_release_device(dev);
2414 free_job_ring(job_ring->uio_fd);
2416 CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
2422 /** Initialise CAAM JR crypto device */
2424 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
2426 struct rte_cryptodev_pmd_init_params init_params = {
2428 sizeof(struct sec_job_ring_t),
2430 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2433 const char *input_args;
2435 name = rte_vdev_device_name(vdev);
2439 input_args = rte_vdev_device_args(vdev);
2440 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
2442 /* if sec device version is not configured */
2443 if (!rta_get_sec_era()) {
2444 const struct device_node *caam_node;
2446 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2447 const uint32_t *prop = of_get_property(caam_node,
2452 INTL_SEC_ERA(cpu_to_caam32(*prop)));
2457 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
2458 if (rta_get_sec_era() > RTA_SEC_ERA_8) {
2460 "CAAM is compiled in BE mode for device with sec era > 8???\n");
2465 return caam_jr_dev_init(name, vdev, &init_params);
2468 /** Uninitialise CAAM JR crypto device */
2470 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
2472 struct rte_cryptodev *cryptodev;
2475 name = rte_vdev_device_name(vdev);
2479 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2480 if (cryptodev == NULL)
2483 caam_jr_dev_uninit(cryptodev);
2485 return rte_cryptodev_pmd_destroy(cryptodev);
2488 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
2489 .probe = cryptodev_caam_jr_probe,
2490 .remove = cryptodev_caam_jr_remove
2493 static struct cryptodev_driver caam_jr_crypto_drv;
2495 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
2496 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
2497 "max_nb_queue_pairs=<int>"
2499 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
2500 cryptodev_driver_id);
2502 RTE_INIT(caam_jr_init_log)
2504 caam_jr_logtype = rte_log_register("pmd.crypto.caam");
2505 if (caam_jr_logtype >= 0)
2506 rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);