crypto/caam_jr: add enqueue/dequeue operations
[dpdk.git] / drivers / crypto / caam_jr / caam_jr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017-2018 NXP
3  */
4
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <sched.h>
8 #include <net/if.h>
9
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
19
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
26
27 /* RTA header files */
28 #include <hw/desc/common.h>
29 #include <hw/desc/algo.h>
30 #include <hw/desc/ipsec.h>
31 #include <of.h>
32
33 #define CAAM_JR_DBG     0
34 #define CRYPTODEV_NAME_CAAM_JR_PMD      crypto_caam_jr
35 static uint8_t cryptodev_driver_id;
36 int caam_jr_logtype;
37
38 enum rta_sec_era rta_sec_era;
39
40 /* Lists the states possible for the SEC user space driver. */
41 enum sec_driver_state_e {
42         SEC_DRIVER_STATE_IDLE,          /* Driver not initialized */
43         SEC_DRIVER_STATE_STARTED,       /* Driver initialized and can be used*/
44         SEC_DRIVER_STATE_RELEASE,       /* Driver release is in progress */
45 };
46
47 /* Job rings used for communication with SEC HW */
48 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
49
50 /* The current state of SEC user space driver */
51 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
52
53 /* The number of job rings used by SEC user space driver */
54 static int g_job_rings_no;
55 static int g_job_rings_max;
56
57 struct sec_outring_entry {
58         phys_addr_t desc;       /* Pointer to completed descriptor */
59         uint32_t status;        /* Status for completed descriptor */
60 } __rte_packed;
61
62 /* virtual address conversin when mempool support is available for ctx */
63 static inline phys_addr_t
64 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
65 {
66         PMD_INIT_FUNC_TRACE();
67         return (size_t)vaddr - ctx->vtop_offset;
68 }
69
70 static inline void
71 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
72 {
73         PMD_INIT_FUNC_TRACE();
74         /* report op status to sym->op and then free the ctx memeory  */
75         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
76 }
77
78 static inline struct caam_jr_op_ctx *
79 caam_jr_alloc_ctx(struct caam_jr_session *ses)
80 {
81         struct caam_jr_op_ctx *ctx;
82         int ret;
83
84         PMD_INIT_FUNC_TRACE();
85         ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
86         if (!ctx || ret) {
87                 CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
88                 return NULL;
89         }
90         /*
91          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
92          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
93          * to clear all the SG entries. caam_jr_alloc_ctx() is called for
94          * each packet, memset is costlier than dcbz_64().
95          */
96         dcbz_64(&ctx->sg[SG_CACHELINE_0]);
97         dcbz_64(&ctx->sg[SG_CACHELINE_1]);
98         dcbz_64(&ctx->sg[SG_CACHELINE_2]);
99         dcbz_64(&ctx->sg[SG_CACHELINE_3]);
100
101         ctx->ctx_pool = ses->ctx_pool;
102         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
103
104         return ctx;
105 }
106
107 static inline int
108 is_cipher_only(struct caam_jr_session *ses)
109 {
110         PMD_INIT_FUNC_TRACE();
111         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
112                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
113 }
114
115 static inline int
116 is_auth_only(struct caam_jr_session *ses)
117 {
118         PMD_INIT_FUNC_TRACE();
119         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
120                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
121 }
122
123 static inline int
124 is_aead(struct caam_jr_session *ses)
125 {
126         PMD_INIT_FUNC_TRACE();
127         return ((ses->cipher_alg == 0) &&
128                 (ses->auth_alg == 0) &&
129                 (ses->aead_alg != 0));
130 }
131
132 static inline int
133 is_auth_cipher(struct caam_jr_session *ses)
134 {
135         PMD_INIT_FUNC_TRACE();
136         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
137                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
138 }
139
140 static inline int
141 is_encode(struct caam_jr_session *ses)
142 {
143         PMD_INIT_FUNC_TRACE();
144         return ses->dir == DIR_ENC;
145 }
146
147 static inline int
148 is_decode(struct caam_jr_session *ses)
149 {
150         PMD_INIT_FUNC_TRACE();
151         return ses->dir == DIR_DEC;
152 }
153
154 static inline void
155 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
156 {
157         PMD_INIT_FUNC_TRACE();
158         switch (ses->auth_alg) {
159         case RTE_CRYPTO_AUTH_NULL:
160                 ses->digest_length = 0;
161                 break;
162         case RTE_CRYPTO_AUTH_MD5_HMAC:
163                 alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
164                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
165                 break;
166         case RTE_CRYPTO_AUTH_SHA1_HMAC:
167                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
168                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
169                 break;
170         case RTE_CRYPTO_AUTH_SHA224_HMAC:
171                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
172                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
173                 break;
174         case RTE_CRYPTO_AUTH_SHA256_HMAC:
175                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
176                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
177                 break;
178         case RTE_CRYPTO_AUTH_SHA384_HMAC:
179                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
180                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
181                 break;
182         case RTE_CRYPTO_AUTH_SHA512_HMAC:
183                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
184                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
185                 break;
186         default:
187                 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
188         }
189 }
190
191 static inline void
192 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
193 {
194         PMD_INIT_FUNC_TRACE();
195         switch (ses->cipher_alg) {
196         case RTE_CRYPTO_CIPHER_NULL:
197                 break;
198         case RTE_CRYPTO_CIPHER_AES_CBC:
199                 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
200                 alginfo_c->algmode = OP_ALG_AAI_CBC;
201                 break;
202         case RTE_CRYPTO_CIPHER_3DES_CBC:
203                 alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
204                 alginfo_c->algmode = OP_ALG_AAI_CBC;
205                 break;
206         case RTE_CRYPTO_CIPHER_AES_CTR:
207                 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
208                 alginfo_c->algmode = OP_ALG_AAI_CTR;
209                 break;
210         default:
211                 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
212         }
213 }
214
215 static inline void
216 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
217 {
218         PMD_INIT_FUNC_TRACE();
219         switch (ses->aead_alg) {
220         case RTE_CRYPTO_AEAD_AES_GCM:
221                 alginfo->algtype = OP_ALG_ALGSEL_AES;
222                 alginfo->algmode = OP_ALG_AAI_GCM;
223                 break;
224         default:
225                 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
226         }
227 }
228
229 /* prepare command block of the session */
230 static int
231 caam_jr_prep_cdb(struct caam_jr_session *ses)
232 {
233         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
234         int32_t shared_desc_len = 0;
235         struct sec_cdb *cdb;
236         int err;
237 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
238         int swap = false;
239 #else
240         int swap = true;
241 #endif
242
243         PMD_INIT_FUNC_TRACE();
244         if (ses->cdb)
245                 caam_jr_dma_free(ses->cdb);
246
247         cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
248         if (!cdb) {
249                 CAAM_JR_ERR("failed to allocate memory for cdb\n");
250                 return -1;
251         }
252
253         ses->cdb = cdb;
254
255         memset(cdb, 0, sizeof(struct sec_cdb));
256
257         if (is_cipher_only(ses)) {
258                 caam_cipher_alg(ses, &alginfo_c);
259                 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
260                         CAAM_JR_ERR("not supported cipher alg");
261                         rte_free(cdb);
262                         return -ENOTSUP;
263                 }
264
265                 alginfo_c.key = (size_t)ses->cipher_key.data;
266                 alginfo_c.keylen = ses->cipher_key.length;
267                 alginfo_c.key_enc_flags = 0;
268                 alginfo_c.key_type = RTA_DATA_IMM;
269
270                 shared_desc_len = cnstr_shdsc_blkcipher(
271                                                 cdb->sh_desc, true,
272                                                 swap, &alginfo_c,
273                                                 NULL,
274                                                 ses->iv.length,
275                                                 ses->dir);
276         } else if (is_auth_only(ses)) {
277                 caam_auth_alg(ses, &alginfo_a);
278                 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
279                         CAAM_JR_ERR("not supported auth alg");
280                         rte_free(cdb);
281                         return -ENOTSUP;
282                 }
283
284                 alginfo_a.key = (size_t)ses->auth_key.data;
285                 alginfo_a.keylen = ses->auth_key.length;
286                 alginfo_a.key_enc_flags = 0;
287                 alginfo_a.key_type = RTA_DATA_IMM;
288
289                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
290                                                    swap, &alginfo_a,
291                                                    !ses->dir,
292                                                    ses->digest_length);
293         } else if (is_aead(ses)) {
294                 caam_aead_alg(ses, &alginfo);
295                 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
296                         CAAM_JR_ERR("not supported aead alg");
297                         rte_free(cdb);
298                         return -ENOTSUP;
299                 }
300                 alginfo.key = (size_t)ses->aead_key.data;
301                 alginfo.keylen = ses->aead_key.length;
302                 alginfo.key_enc_flags = 0;
303                 alginfo.key_type = RTA_DATA_IMM;
304
305                 if (ses->dir == DIR_ENC)
306                         shared_desc_len = cnstr_shdsc_gcm_encap(
307                                         cdb->sh_desc, true, swap,
308                                         &alginfo,
309                                         ses->iv.length,
310                                         ses->digest_length);
311                 else
312                         shared_desc_len = cnstr_shdsc_gcm_decap(
313                                         cdb->sh_desc, true, swap,
314                                         &alginfo,
315                                         ses->iv.length,
316                                         ses->digest_length);
317         } else {
318                 caam_cipher_alg(ses, &alginfo_c);
319                 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
320                         CAAM_JR_ERR("not supported cipher alg");
321                         rte_free(cdb);
322                         return -ENOTSUP;
323                 }
324
325                 alginfo_c.key = (size_t)ses->cipher_key.data;
326                 alginfo_c.keylen = ses->cipher_key.length;
327                 alginfo_c.key_enc_flags = 0;
328                 alginfo_c.key_type = RTA_DATA_IMM;
329
330                 caam_auth_alg(ses, &alginfo_a);
331                 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
332                         CAAM_JR_ERR("not supported auth alg");
333                         rte_free(cdb);
334                         return -ENOTSUP;
335                 }
336
337                 alginfo_a.key = (size_t)ses->auth_key.data;
338                 alginfo_a.keylen = ses->auth_key.length;
339                 alginfo_a.key_enc_flags = 0;
340                 alginfo_a.key_type = RTA_DATA_IMM;
341
342                 cdb->sh_desc[0] = alginfo_c.keylen;
343                 cdb->sh_desc[1] = alginfo_a.keylen;
344                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
345                                        MIN_JOB_DESC_SIZE,
346                                        (unsigned int *)cdb->sh_desc,
347                                        &cdb->sh_desc[2], 2);
348
349                 if (err < 0) {
350                         CAAM_JR_ERR("Crypto: Incorrect key lengths");
351                         rte_free(cdb);
352                         return err;
353                 }
354                 if (cdb->sh_desc[2] & 1)
355                         alginfo_c.key_type = RTA_DATA_IMM;
356                 else {
357                         alginfo_c.key = (size_t)caam_jr_mem_vtop(
358                                                 (void *)(size_t)alginfo_c.key);
359                         alginfo_c.key_type = RTA_DATA_PTR;
360                 }
361                 if (cdb->sh_desc[2] & (1<<1))
362                         alginfo_a.key_type = RTA_DATA_IMM;
363                 else {
364                         alginfo_a.key = (size_t)caam_jr_mem_vtop(
365                                                 (void *)(size_t)alginfo_a.key);
366                         alginfo_a.key_type = RTA_DATA_PTR;
367                 }
368                 cdb->sh_desc[0] = 0;
369                 cdb->sh_desc[1] = 0;
370                 cdb->sh_desc[2] = 0;
371                         /* Auth_only_len is set as 0 here and it will be
372                          * overwritten in fd for each packet.
373                          */
374                         shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
375                                         true, swap, &alginfo_c, &alginfo_a,
376                                         ses->iv.length, 0,
377                                         ses->digest_length, ses->dir);
378         }
379
380         if (shared_desc_len < 0) {
381                 CAAM_JR_ERR("error in preparing command block");
382                 return shared_desc_len;
383         }
384
385 #if CAAM_JR_DBG
386         SEC_DUMP_DESC(cdb->sh_desc);
387 #endif
388
389         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
390
391         return 0;
392 }
393
394 /* @brief Poll the HW for already processed jobs in the JR
395  * and silently discard the available jobs or notify them to UA
396  * with indicated error code.
397  *
398  * @param [in,out]  job_ring        The job ring to poll.
399  * @param [in]  do_notify           Can be #TRUE or #FALSE. Indicates if
400  *                                  descriptors are to be discarded
401  *                                  or notified to UA with given error_code.
402  * @param [out] notified_descs    Number of notified descriptors. Can be NULL
403  *                                      if do_notify is #FALSE
404  */
405 static void
406 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
407                   uint32_t do_notify,
408                   uint32_t *notified_descs)
409 {
410         int32_t jobs_no_to_discard = 0;
411         int32_t discarded_descs_no = 0;
412
413         PMD_INIT_FUNC_TRACE();
414         CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
415                 job_ring, job_ring->pidx, job_ring->cidx, do_notify);
416
417         jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
418
419         /* Discard all jobs */
420         CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
421                   job_ring, job_ring->pidx, job_ring->cidx,
422                   jobs_no_to_discard);
423
424         while (jobs_no_to_discard > discarded_descs_no) {
425                 discarded_descs_no++;
426                 /* Now increment the consumer index for the current job ring,
427                  * AFTER saving job in temporary location!
428                  * Increment the consumer index for the current job ring
429                  */
430                 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
431                                          SEC_JOB_RING_SIZE);
432
433                 hw_remove_entries(job_ring, 1);
434         }
435
436         if (do_notify == true) {
437                 ASSERT(notified_descs != NULL);
438                 *notified_descs = discarded_descs_no;
439         }
440 }
441
442 /* @brief Poll the HW for already processed jobs in the JR
443  * and notify the available jobs to UA.
444  *
445  * @param [in]  job_ring        The job ring to poll.
446  * @param [in]  limit           The maximum number of jobs to notify.
447  *                              If set to negative value, all available jobs are
448  *                              notified.
449  *
450  * @retval >=0 for No of jobs notified to UA.
451  * @retval -1 for error
452  */
453 static int
454 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
455                  struct rte_crypto_op **ops, int32_t limit,
456                  struct caam_jr_qp *jr_qp)
457 {
458         int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
459         int32_t number_of_jobs_available = 0;
460         int32_t notified_descs_no = 0;
461         uint32_t sec_error_code = 0;
462         struct job_descriptor *current_desc;
463         phys_addr_t current_desc_addr;
464         phys_addr_t *temp_addr;
465         struct caam_jr_op_ctx *ctx;
466
467         PMD_INIT_FUNC_TRACE();
468         /* TODO check for ops have memory*/
469         /* check here if any JR error that cannot be written
470          * in the output status word has occurred
471          */
472         if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
473                 CAAM_JR_INFO("err received");
474                 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
475                                         GET_JR_REG(JRINT, job_ring));
476                 if (unlikely(sec_error_code)) {
477                         hw_job_ring_error_print(job_ring, sec_error_code);
478                         return -1;
479                 }
480         }
481         /* compute the number of jobs available in the job ring based on the
482          * producer and consumer index values.
483          */
484         number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
485         /* Compute the number of notifications that need to be raised to UA
486          * If limit > total number of done jobs -> notify all done jobs
487          * If limit = 0 -> error
488          * If limit < total number of done jobs -> notify a number
489          * of done jobs equal with limit
490          */
491         jobs_no_to_notify = (limit > number_of_jobs_available) ?
492                                 number_of_jobs_available : limit;
493         CAAM_JR_DP_DEBUG(
494                 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
495                 job_ring, job_ring->pidx, job_ring->cidx,
496                 limit, number_of_jobs_available, jobs_no_to_notify);
497
498         rte_smp_rmb();
499
500         while (jobs_no_to_notify > notified_descs_no) {
501                 static uint64_t false_alarm;
502                 static uint64_t real_poll;
503
504                 /* Get job status here */
505                 sec_error_code = job_ring->output_ring[job_ring->cidx].status;
506                 /* Get completed descriptor */
507                 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
508                 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
509
510                 real_poll++;
511                 /* todo check if it is false alarm no desc present */
512                 if (!current_desc_addr) {
513                         false_alarm++;
514                         printf("false alarm %" PRIu64 "real %" PRIu64
515                                 " sec_err =0x%x cidx Index =0%d\n",
516                                 false_alarm, real_poll,
517                                 sec_error_code, job_ring->cidx);
518                         rte_panic("CAAM JR descriptor NULL");
519                         return notified_descs_no;
520                 }
521                 current_desc = (struct job_descriptor *)
522                                 caam_jr_dma_ptov(current_desc_addr);
523                 /* now increment the consumer index for the current job ring,
524                  * AFTER saving job in temporary location!
525                  */
526                 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
527                                  SEC_JOB_RING_SIZE);
528                 /* Signal that the job has been processed and the slot is free*/
529                 hw_remove_entries(job_ring, 1);
530                 /*TODO for multiple ops, packets*/
531                 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
532                 if (unlikely(sec_error_code)) {
533                         CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
534                                 job_ring->cidx, sec_error_code);
535                         hw_handle_job_ring_error(job_ring, sec_error_code);
536                         //todo improve with exact errors
537                         ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
538                         jr_qp->rx_errs++;
539                 } else {
540                         ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
541 #if CAAM_JR_DBG
542                         if (ctx->op->sym->m_dst) {
543                                 rte_hexdump(stdout, "PROCESSED",
544                                 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
545                                 rte_pktmbuf_data_len(ctx->op->sym->m_dst));
546                         } else {
547                                 rte_hexdump(stdout, "PROCESSED",
548                                 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
549                                 rte_pktmbuf_data_len(ctx->op->sym->m_src));
550                         }
551 #endif
552                 }
553                 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
554                         struct ip *ip4_hdr;
555
556                         if (ctx->op->sym->m_dst) {
557                                 /*TODO check for ip header or other*/
558                                 ip4_hdr = (struct ip *)
559                                 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
560                                 ctx->op->sym->m_dst->pkt_len =
561                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
562                                 ctx->op->sym->m_dst->data_len =
563                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
564                         } else {
565                                 ip4_hdr = (struct ip *)
566                                 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
567                                 ctx->op->sym->m_src->pkt_len =
568                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
569                                 ctx->op->sym->m_src->data_len =
570                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
571                         }
572                 }
573                 *ops = ctx->op;
574                 caam_jr_op_ending(ctx);
575                 ops++;
576                 notified_descs_no++;
577         }
578         return notified_descs_no;
579 }
580
581 static uint16_t
582 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
583                        uint16_t nb_ops)
584 {
585         struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
586         struct sec_job_ring_t *ring = jr_qp->ring;
587         int num_rx;
588         int ret;
589
590         PMD_INIT_FUNC_TRACE();
591         CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
592
593         /* Poll job ring
594          * If nb_ops < 0 -> poll JR until no more notifications are available.
595          * If nb_ops > 0 -> poll JR until limit is reached.
596          */
597
598         /* Run hw poll job ring */
599         num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
600         if (num_rx < 0) {
601                 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
602                 return 0;
603         }
604
605         CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
606
607         if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
608                 if (num_rx < nb_ops) {
609                         ret = caam_jr_enable_irqs(ring->irq_fd);
610                         SEC_ASSERT(ret == 0, ret,
611                         "Failed to enable irqs for job ring %p", ring);
612                 }
613         } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
614
615                 /* Always enable IRQ generation when in pure IRQ mode */
616                 ret = caam_jr_enable_irqs(ring->irq_fd);
617                 SEC_ASSERT(ret == 0, ret,
618                         "Failed to enable irqs for job ring %p", ring);
619         }
620
621         jr_qp->rx_pkts += num_rx;
622
623         return num_rx;
624 }
625
626 static inline struct caam_jr_op_ctx *
627 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
628 {
629         struct rte_crypto_sym_op *sym = op->sym;
630         struct caam_jr_op_ctx *ctx;
631         struct sec4_sg_entry *sg;
632         rte_iova_t start_addr;
633         struct sec_cdb *cdb;
634         uint64_t sdesc_offset;
635         struct sec_job_descriptor_t *jobdescr;
636
637         PMD_INIT_FUNC_TRACE();
638         ctx = caam_jr_alloc_ctx(ses);
639         if (!ctx)
640                 return NULL;
641
642         ctx->op = op;
643
644         cdb = ses->cdb;
645         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
646
647         start_addr = rte_pktmbuf_iova(sym->m_src);
648
649         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
650
651         SEC_JD_INIT(jobdescr);
652         SEC_JD_SET_SD(jobdescr,
653                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
654                 cdb->sh_hdr.hi.field.idlen);
655
656         /* output */
657         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
658                         0, ses->digest_length);
659
660         /*input */
661         if (is_decode(ses)) {
662                 sg = &ctx->sg[0];
663                 SEC_JD_SET_IN_PTR(jobdescr,
664                         (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
665                         (sym->auth.data.length + ses->digest_length));
666                 /* enabling sg list */
667                 (jobdescr)->seq_in.command.word  |= 0x01000000;
668
669                 /* hash result or digest, save digest first */
670                 rte_memcpy(ctx->digest, sym->auth.digest.data,
671                            ses->digest_length);
672                 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
673                 sg->len = cpu_to_caam32(sym->auth.data.length);
674
675 #if CAAM_JR_DBG
676                 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
677 #endif
678                 /* let's check digest by hw */
679                 sg++;
680                 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
681                 sg->len = cpu_to_caam32(ses->digest_length);
682                 /* last element*/
683                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
684         } else {
685                 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
686                         sym->auth.data.offset, sym->auth.data.length);
687         }
688         return ctx;
689 }
690
691 static inline struct caam_jr_op_ctx *
692 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
693 {
694         struct rte_crypto_sym_op *sym = op->sym;
695         struct caam_jr_op_ctx *ctx;
696         struct sec4_sg_entry *sg;
697         rte_iova_t src_start_addr, dst_start_addr;
698         struct sec_cdb *cdb;
699         uint64_t sdesc_offset;
700         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
701                         ses->iv.offset);
702         struct sec_job_descriptor_t *jobdescr;
703
704         PMD_INIT_FUNC_TRACE();
705         ctx = caam_jr_alloc_ctx(ses);
706         if (!ctx)
707                 return NULL;
708
709         ctx->op = op;
710         cdb = ses->cdb;
711         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
712
713         src_start_addr = rte_pktmbuf_iova(sym->m_src);
714         if (sym->m_dst)
715                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
716         else
717                 dst_start_addr = src_start_addr;
718
719         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
720
721         SEC_JD_INIT(jobdescr);
722         SEC_JD_SET_SD(jobdescr,
723                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
724                 cdb->sh_hdr.hi.field.idlen);
725
726 #if CAAM_JR_DBG
727         CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
728                         sym->m_src->data_off, sym->cipher.data.offset,
729                         sym->cipher.data.length, ses->iv.length);
730 #endif
731         /* output */
732         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
733                         sym->cipher.data.offset,
734                         sym->cipher.data.length + ses->iv.length);
735
736         /*input */
737         sg = &ctx->sg[0];
738         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
739                                 sym->cipher.data.length + ses->iv.length);
740         /*enabling sg bit */
741         (jobdescr)->seq_in.command.word  |= 0x01000000;
742
743         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
744         sg->len = cpu_to_caam32(ses->iv.length);
745
746         sg = &ctx->sg[1];
747         sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
748         sg->len = cpu_to_caam32(sym->cipher.data.length);
749         /* last element*/
750         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
751
752         return ctx;
753 }
754
755 static inline struct caam_jr_op_ctx *
756 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
757 {
758         struct rte_crypto_sym_op *sym = op->sym;
759         struct caam_jr_op_ctx *ctx;
760         struct sec4_sg_entry *sg;
761         rte_iova_t src_start_addr, dst_start_addr;
762         uint32_t length = 0;
763         struct sec_cdb *cdb;
764         uint64_t sdesc_offset;
765         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
766                         ses->iv.offset);
767         struct sec_job_descriptor_t *jobdescr;
768         uint32_t auth_only_len;
769
770         PMD_INIT_FUNC_TRACE();
771         auth_only_len = op->sym->auth.data.length -
772                                 op->sym->cipher.data.length;
773
774         src_start_addr = rte_pktmbuf_iova(sym->m_src);
775         if (sym->m_dst)
776                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
777         else
778                 dst_start_addr = src_start_addr;
779
780         ctx = caam_jr_alloc_ctx(ses);
781         if (!ctx)
782                 return NULL;
783
784         ctx->op = op;
785         cdb = ses->cdb;
786         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
787
788         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
789
790         SEC_JD_INIT(jobdescr);
791         SEC_JD_SET_SD(jobdescr,
792                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
793                 cdb->sh_hdr.hi.field.idlen);
794
795         /* input */
796         sg = &ctx->sg[0];
797         if (is_encode(ses)) {
798                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
799                 sg->len = cpu_to_caam32(ses->iv.length);
800                 length += ses->iv.length;
801
802                 sg++;
803                 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
804                 sg->len = cpu_to_caam32(sym->auth.data.length);
805                 length += sym->auth.data.length;
806                 /* last element*/
807                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
808         } else {
809                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
810                 sg->len = cpu_to_caam32(ses->iv.length);
811                 length += ses->iv.length;
812
813                 sg++;
814                 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
815                 sg->len = cpu_to_caam32(sym->auth.data.length);
816                 length += sym->auth.data.length;
817
818                 rte_memcpy(ctx->digest, sym->auth.digest.data,
819                        ses->digest_length);
820                 sg++;
821                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
822                 sg->len = cpu_to_caam32(ses->digest_length);
823                 length += ses->digest_length;
824                 /* last element*/
825                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
826         }
827
828         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
829                                 length);
830         /* set sg bit */
831         (jobdescr)->seq_in.command.word  |= 0x01000000;
832
833         /* output */
834         sg = &ctx->sg[6];
835
836         sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
837         sg->len = cpu_to_caam32(sym->cipher.data.length);
838         length = sym->cipher.data.length;
839
840         if (is_encode(ses)) {
841                 /* set auth output */
842                 sg++;
843                 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
844                 sg->len = cpu_to_caam32(ses->digest_length);
845                 length += ses->digest_length;
846         }
847         /* last element*/
848         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
849
850         SEC_JD_SET_OUT_PTR(jobdescr,
851                            (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
852         /* set sg bit */
853         (jobdescr)->seq_out.command.word  |= 0x01000000;
854
855         /* Auth_only_len is set as 0 in descriptor and it is
856          * overwritten here in the jd which will update
857          * the DPOVRD reg.
858          */
859         if (auth_only_len)
860                 /* set sg bit */
861                 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
862
863         return ctx;
864 }
865 static int
866 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
867 {
868         struct sec_job_ring_t *ring = qp->ring;
869         struct caam_jr_session *ses;
870         struct caam_jr_op_ctx *ctx = NULL;
871         struct sec_job_descriptor_t *jobdescr __rte_unused;
872
873         PMD_INIT_FUNC_TRACE();
874         switch (op->sess_type) {
875         case RTE_CRYPTO_OP_WITH_SESSION:
876                 ses = (struct caam_jr_session *)
877                 get_sym_session_private_data(op->sym->session,
878                                         cryptodev_driver_id);
879                 break;
880         default:
881                 CAAM_JR_DP_ERR("sessionless crypto op not supported");
882                 qp->tx_errs++;
883                 return -1;
884         }
885
886         if (unlikely(!ses->qp || ses->qp != qp)) {
887                 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
888                 ses->qp = qp;
889                 caam_jr_prep_cdb(ses);
890         }
891
892         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
893                 if (is_auth_cipher(ses))
894                         ctx = build_cipher_auth(op, ses);
895                 else if (is_aead(ses))
896                         goto err1;
897                 else if (is_auth_only(ses))
898                         ctx = build_auth_only(op, ses);
899                 else if (is_cipher_only(ses))
900                         ctx = build_cipher_only(op, ses);
901         } else {
902                 if (is_aead(ses))
903                         goto err1;
904         }
905 err1:
906         if (unlikely(!ctx)) {
907                 qp->tx_errs++;
908                 CAAM_JR_ERR("not supported sec op");
909                 return -1;
910         }
911 #if CAAM_JR_DBG
912         if (is_decode(ses))
913                 rte_hexdump(stdout, "DECODE",
914                         rte_pktmbuf_mtod(op->sym->m_src, void *),
915                         rte_pktmbuf_data_len(op->sym->m_src));
916         else
917                 rte_hexdump(stdout, "ENCODE",
918                         rte_pktmbuf_mtod(op->sym->m_src, void *),
919                         rte_pktmbuf_data_len(op->sym->m_src));
920
921         printf("\n JD before conversion\n");
922         for (int i = 0; i < 12; i++)
923                 printf("\n 0x%08x", ctx->jobdes.desc[i]);
924 #endif
925
926         CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
927                       ring, ring->pidx, ring->cidx);
928
929         /* todo - do we want to retry */
930         if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
931                          SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
932                 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
933                               ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
934                 caam_jr_op_ending(ctx);
935                 qp->tx_ring_full++;
936                 return -EBUSY;
937         }
938
939 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
940         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
941
942         jobdescr->deschdr.command.word =
943                 cpu_to_caam32(jobdescr->deschdr.command.word);
944         jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
945         jobdescr->seq_out.command.word =
946                 cpu_to_caam32(jobdescr->seq_out.command.word);
947         jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
948         jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
949         jobdescr->seq_in.command.word =
950                 cpu_to_caam32(jobdescr->seq_in.command.word);
951         jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
952         jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
953         jobdescr->load_dpovrd.command.word =
954                 cpu_to_caam32(jobdescr->load_dpovrd.command.word);
955         jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
956 #endif
957
958         /* Set ptr in input ring to current descriptor  */
959         sec_write_addr(&ring->input_ring[ring->pidx],
960                         (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
961         rte_smp_wmb();
962
963         /* Notify HW that a new job is enqueued */
964         hw_enqueue_desc_on_job_ring(ring);
965
966         /* increment the producer index for the current job ring */
967         ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
968
969         return 0;
970 }
971
972 static uint16_t
973 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
974                        uint16_t nb_ops)
975 {
976         /* Function to transmit the frames to given device and queuepair */
977         uint32_t loop;
978         int32_t ret;
979         struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
980         uint16_t num_tx = 0;
981
982         PMD_INIT_FUNC_TRACE();
983         /*Prepare each packet which is to be sent*/
984         for (loop = 0; loop < nb_ops; loop++) {
985                 ret = caam_jr_enqueue_op(ops[loop], jr_qp);
986                 if (!ret)
987                         num_tx++;
988         }
989
990         jr_qp->tx_pkts += num_tx;
991
992         return num_tx;
993 }
994
995 /* Release queue pair */
996 static int
997 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
998                            uint16_t qp_id)
999 {
1000         struct sec_job_ring_t *internals;
1001         struct caam_jr_qp *qp = NULL;
1002
1003         PMD_INIT_FUNC_TRACE();
1004         CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1005
1006         internals = dev->data->dev_private;
1007         if (qp_id >= internals->max_nb_queue_pairs) {
1008                 CAAM_JR_ERR("Max supported qpid %d",
1009                              internals->max_nb_queue_pairs);
1010                 return -EINVAL;
1011         }
1012
1013         qp = &internals->qps[qp_id];
1014         qp->ring = NULL;
1015         dev->data->queue_pairs[qp_id] = NULL;
1016
1017         return 0;
1018 }
1019
1020 /* Setup a queue pair */
1021 static int
1022 caam_jr_queue_pair_setup(
1023                 struct rte_cryptodev *dev, uint16_t qp_id,
1024                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1025                 __rte_unused int socket_id,
1026                 __rte_unused struct rte_mempool *session_pool)
1027 {
1028         struct sec_job_ring_t *internals;
1029         struct caam_jr_qp *qp = NULL;
1030
1031         PMD_INIT_FUNC_TRACE();
1032         CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1033
1034         internals = dev->data->dev_private;
1035         if (qp_id >= internals->max_nb_queue_pairs) {
1036                 CAAM_JR_ERR("Max supported qpid %d",
1037                              internals->max_nb_queue_pairs);
1038                 return -EINVAL;
1039         }
1040
1041         qp = &internals->qps[qp_id];
1042         qp->ring = internals;
1043         dev->data->queue_pairs[qp_id] = qp;
1044
1045         return 0;
1046 }
1047
1048 /* Return the number of allocated queue pairs */
1049 static uint32_t
1050 caam_jr_queue_pair_count(struct rte_cryptodev *dev)
1051 {
1052         PMD_INIT_FUNC_TRACE();
1053
1054         return dev->data->nb_queue_pairs;
1055 }
1056
1057 /* Returns the size of the aesni gcm session structure */
1058 static unsigned int
1059 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1060 {
1061         PMD_INIT_FUNC_TRACE();
1062
1063         return sizeof(struct caam_jr_session);
1064 }
1065
1066 static int
1067 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1068                     struct rte_crypto_sym_xform *xform,
1069                     struct caam_jr_session *session)
1070 {
1071         PMD_INIT_FUNC_TRACE();
1072         session->cipher_alg = xform->cipher.algo;
1073         session->iv.length = xform->cipher.iv.length;
1074         session->iv.offset = xform->cipher.iv.offset;
1075         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1076                                                RTE_CACHE_LINE_SIZE);
1077         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1078                 CAAM_JR_ERR("No Memory for cipher key\n");
1079                 return -ENOMEM;
1080         }
1081         session->cipher_key.length = xform->cipher.key.length;
1082
1083         memcpy(session->cipher_key.data, xform->cipher.key.data,
1084                xform->cipher.key.length);
1085         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1086                         DIR_ENC : DIR_DEC;
1087
1088         return 0;
1089 }
1090
1091 static int
1092 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1093                   struct rte_crypto_sym_xform *xform,
1094                   struct caam_jr_session *session)
1095 {
1096         PMD_INIT_FUNC_TRACE();
1097         session->auth_alg = xform->auth.algo;
1098         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1099                                              RTE_CACHE_LINE_SIZE);
1100         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1101                 CAAM_JR_ERR("No Memory for auth key\n");
1102                 return -ENOMEM;
1103         }
1104         session->auth_key.length = xform->auth.key.length;
1105         session->digest_length = xform->auth.digest_length;
1106
1107         memcpy(session->auth_key.data, xform->auth.key.data,
1108                xform->auth.key.length);
1109         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1110                         DIR_ENC : DIR_DEC;
1111
1112         return 0;
1113 }
1114
1115 static int
1116 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1117                   struct rte_crypto_sym_xform *xform,
1118                   struct caam_jr_session *session)
1119 {
1120         PMD_INIT_FUNC_TRACE();
1121         session->aead_alg = xform->aead.algo;
1122         session->iv.length = xform->aead.iv.length;
1123         session->iv.offset = xform->aead.iv.offset;
1124         session->auth_only_len = xform->aead.aad_length;
1125         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1126                                              RTE_CACHE_LINE_SIZE);
1127         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1128                 CAAM_JR_ERR("No Memory for aead key\n");
1129                 return -ENOMEM;
1130         }
1131         session->aead_key.length = xform->aead.key.length;
1132         session->digest_length = xform->aead.digest_length;
1133
1134         memcpy(session->aead_key.data, xform->aead.key.data,
1135                xform->aead.key.length);
1136         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1137                         DIR_ENC : DIR_DEC;
1138
1139         return 0;
1140 }
1141
1142 static int
1143 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1144                                struct rte_crypto_sym_xform *xform, void *sess)
1145 {
1146         struct sec_job_ring_t *internals = dev->data->dev_private;
1147         struct caam_jr_session *session = sess;
1148
1149         PMD_INIT_FUNC_TRACE();
1150
1151         if (unlikely(sess == NULL)) {
1152                 CAAM_JR_ERR("invalid session struct");
1153                 return -EINVAL;
1154         }
1155
1156         /* Default IV length = 0 */
1157         session->iv.length = 0;
1158
1159         /* Cipher Only */
1160         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1161                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1162                 caam_jr_cipher_init(dev, xform, session);
1163
1164         /* Authentication Only */
1165         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1166                    xform->next == NULL) {
1167                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1168                 caam_jr_auth_init(dev, xform, session);
1169
1170         /* Cipher then Authenticate */
1171         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1172                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1173                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1174                         caam_jr_cipher_init(dev, xform, session);
1175                         caam_jr_auth_init(dev, xform->next, session);
1176                 } else {
1177                         CAAM_JR_ERR("Not supported: Auth then Cipher");
1178                         goto err1;
1179                 }
1180
1181         /* Authenticate then Cipher */
1182         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1183                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1184                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1185                         caam_jr_auth_init(dev, xform, session);
1186                         caam_jr_cipher_init(dev, xform->next, session);
1187                 } else {
1188                         CAAM_JR_ERR("Not supported: Auth then Cipher");
1189                         goto err1;
1190                 }
1191
1192         /* AEAD operation for AES-GCM kind of Algorithms */
1193         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1194                    xform->next == NULL) {
1195                 caam_jr_aead_init(dev, xform, session);
1196
1197         } else {
1198                 CAAM_JR_ERR("Invalid crypto type");
1199                 return -EINVAL;
1200         }
1201         session->ctx_pool = internals->ctx_pool;
1202
1203         return 0;
1204
1205 err1:
1206         rte_free(session->cipher_key.data);
1207         rte_free(session->auth_key.data);
1208         memset(session, 0, sizeof(struct caam_jr_session));
1209
1210         return -EINVAL;
1211 }
1212
1213 static int
1214 caam_jr_sym_session_configure(struct rte_cryptodev *dev,
1215                               struct rte_crypto_sym_xform *xform,
1216                               struct rte_cryptodev_sym_session *sess,
1217                               struct rte_mempool *mempool)
1218 {
1219         void *sess_private_data;
1220         int ret;
1221
1222         PMD_INIT_FUNC_TRACE();
1223
1224         if (rte_mempool_get(mempool, &sess_private_data)) {
1225                 CAAM_JR_ERR("Couldn't get object from session mempool");
1226                 return -ENOMEM;
1227         }
1228
1229         memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1230         ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1231         if (ret != 0) {
1232                 CAAM_JR_ERR("failed to configure session parameters");
1233                 /* Return session to mempool */
1234                 rte_mempool_put(mempool, sess_private_data);
1235                 return ret;
1236         }
1237
1238         set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
1239
1240         return 0;
1241 }
1242
1243 /* Clear the memory of session so it doesn't leave key material behind */
1244 static void
1245 caam_jr_sym_session_clear(struct rte_cryptodev *dev,
1246                 struct rte_cryptodev_sym_session *sess)
1247 {
1248         uint8_t index = dev->driver_id;
1249         void *sess_priv = get_sym_session_private_data(sess, index);
1250         struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1251
1252         PMD_INIT_FUNC_TRACE();
1253
1254         if (sess_priv) {
1255                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1256
1257                 rte_free(s->cipher_key.data);
1258                 rte_free(s->auth_key.data);
1259                 memset(s, 0, sizeof(struct caam_jr_session));
1260                 set_sym_session_private_data(sess, index, NULL);
1261                 rte_mempool_put(sess_mp, sess_priv);
1262         }
1263 }
1264
1265 static int
1266 caam_jr_dev_configure(struct rte_cryptodev *dev,
1267                        struct rte_cryptodev_config *config __rte_unused)
1268 {
1269         char str[20];
1270         struct sec_job_ring_t *internals;
1271
1272         PMD_INIT_FUNC_TRACE();
1273
1274         internals = dev->data->dev_private;
1275         sprintf(str, "ctx_pool_%d", dev->data->dev_id);
1276         if (!internals->ctx_pool) {
1277                 internals->ctx_pool = rte_mempool_create((const char *)str,
1278                                                 CTX_POOL_NUM_BUFS,
1279                                                 sizeof(struct caam_jr_op_ctx),
1280                                                 CTX_POOL_CACHE_SIZE, 0,
1281                                                 NULL, NULL, NULL, NULL,
1282                                                 SOCKET_ID_ANY, 0);
1283                 if (!internals->ctx_pool) {
1284                         CAAM_JR_ERR("%s create failed\n", str);
1285                         return -ENOMEM;
1286                 }
1287         } else
1288                 CAAM_JR_INFO("mempool already created for dev_id : %d",
1289                                 dev->data->dev_id);
1290
1291         return 0;
1292 }
1293
1294 static int
1295 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
1296 {
1297         PMD_INIT_FUNC_TRACE();
1298         return 0;
1299 }
1300
1301 static void
1302 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
1303 {
1304         PMD_INIT_FUNC_TRACE();
1305 }
1306
1307 static int
1308 caam_jr_dev_close(struct rte_cryptodev *dev)
1309 {
1310         struct sec_job_ring_t *internals;
1311
1312         PMD_INIT_FUNC_TRACE();
1313
1314         if (dev == NULL)
1315                 return -ENOMEM;
1316
1317         internals = dev->data->dev_private;
1318         rte_mempool_free(internals->ctx_pool);
1319         internals->ctx_pool = NULL;
1320
1321         return 0;
1322 }
1323
1324 static void
1325 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
1326                        struct rte_cryptodev_info *info)
1327 {
1328         struct sec_job_ring_t *internals = dev->data->dev_private;
1329
1330         PMD_INIT_FUNC_TRACE();
1331         if (info != NULL) {
1332                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1333                 info->feature_flags = dev->feature_flags;
1334                 info->capabilities = caam_jr_get_cryptodev_capabilities();
1335                 info->sym.max_nb_sessions = internals->max_nb_sessions;
1336                 info->driver_id = cryptodev_driver_id;
1337         }
1338 }
1339
1340 static struct rte_cryptodev_ops caam_jr_ops = {
1341         .dev_configure        = caam_jr_dev_configure,
1342         .dev_start            = caam_jr_dev_start,
1343         .dev_stop             = caam_jr_dev_stop,
1344         .dev_close            = caam_jr_dev_close,
1345         .dev_infos_get        = caam_jr_dev_infos_get,
1346         .queue_pair_setup     = caam_jr_queue_pair_setup,
1347         .queue_pair_release   = caam_jr_queue_pair_release,
1348         .queue_pair_count     = caam_jr_queue_pair_count,
1349         .sym_session_get_size = caam_jr_sym_session_get_size,
1350         .sym_session_configure = caam_jr_sym_session_configure,
1351         .sym_session_clear    = caam_jr_sym_session_clear
1352 };
1353
1354
1355 /* @brief Flush job rings of any processed descs.
1356  * The processed descs are silently dropped,
1357  * WITHOUT being notified to UA.
1358  */
1359 static void
1360 close_job_ring(struct sec_job_ring_t *job_ring)
1361 {
1362         PMD_INIT_FUNC_TRACE();
1363         if (job_ring->irq_fd) {
1364                 /* Producer index is frozen. If consumer index is not equal
1365                  * with producer index, then we have descs to flush.
1366                  */
1367                 while (job_ring->pidx != job_ring->cidx)
1368                         hw_flush_job_ring(job_ring, false, NULL);
1369
1370                 /* free the uio job ring */
1371                 free_job_ring(job_ring->irq_fd);
1372                 job_ring->irq_fd = 0;
1373                 caam_jr_dma_free(job_ring->input_ring);
1374                 caam_jr_dma_free(job_ring->output_ring);
1375                 g_job_rings_no--;
1376         }
1377 }
1378
1379 /** @brief Release the software and hardware resources tied to a job ring.
1380  * @param [in] job_ring The job ring
1381  *
1382  * @retval  0 for success
1383  * @retval  -1 for error
1384  */
1385 static int
1386 shutdown_job_ring(struct sec_job_ring_t *job_ring)
1387 {
1388         int ret = 0;
1389
1390         PMD_INIT_FUNC_TRACE();
1391         ASSERT(job_ring != NULL);
1392         ret = hw_shutdown_job_ring(job_ring);
1393         SEC_ASSERT(ret == 0, ret,
1394                 "Failed to shutdown hardware job ring %p",
1395                 job_ring);
1396
1397         if (job_ring->coalescing_en)
1398                 hw_job_ring_disable_coalescing(job_ring);
1399
1400         if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
1401                 ret = caam_jr_disable_irqs(job_ring->irq_fd);
1402                 SEC_ASSERT(ret == 0, ret,
1403                 "Failed to disable irqs for job ring %p",
1404                 job_ring);
1405         }
1406
1407         return ret;
1408 }
1409
1410 /*
1411  * @brief Release the resources used by the SEC user space driver.
1412  *
1413  * Reset and release SEC's job rings indicated by the User Application at
1414  * init_job_ring() and free any memory allocated internally.
1415  * Call once during application tear down.
1416  *
1417  * @note In case there are any descriptors in-flight (descriptors received by
1418  * SEC driver for processing and for which no response was yet provided to UA),
1419  * the descriptors are discarded without any notifications to User Application.
1420  *
1421  * @retval ::0                  is returned for a successful execution
1422  * @retval ::-1         is returned if SEC driver release is in progress
1423  */
1424 static int
1425 caam_jr_dev_uninit(struct rte_cryptodev *dev)
1426 {
1427         struct sec_job_ring_t *internals;
1428
1429         PMD_INIT_FUNC_TRACE();
1430         if (dev == NULL)
1431                 return -ENODEV;
1432
1433         internals = dev->data->dev_private;
1434         rte_free(dev->security_ctx);
1435
1436         /* If any descriptors in flight , poll and wait
1437          * until all descriptors are received and silently discarded.
1438          */
1439         if (internals) {
1440                 shutdown_job_ring(internals);
1441                 close_job_ring(internals);
1442                 rte_mempool_free(internals->ctx_pool);
1443         }
1444
1445         CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
1446
1447         /* last caam jr instance) */
1448         if (g_job_rings_no == 0)
1449                 g_driver_state = SEC_DRIVER_STATE_IDLE;
1450
1451         return SEC_SUCCESS;
1452 }
1453
1454 /* @brief Initialize the software and hardware resources tied to a job ring.
1455  * @param [in] jr_mode;         Model to be used by SEC Driver to receive
1456  *                              notifications from SEC.  Can be either
1457  *                              of the three: #SEC_NOTIFICATION_TYPE_NAPI
1458  *                              #SEC_NOTIFICATION_TYPE_IRQ or
1459  *                              #SEC_NOTIFICATION_TYPE_POLL
1460  * @param [in] NAPI_mode        The NAPI work mode to configure a job ring at
1461  *                              startup. Used only when #SEC_NOTIFICATION_TYPE
1462  *                              is set to #SEC_NOTIFICATION_TYPE_NAPI.
1463  * @param [in] irq_coalescing_timer This value determines the maximum
1464  *                                      amount of time after processing a
1465  *                                      descriptor before raising an interrupt.
1466  * @param [in] irq_coalescing_count This value determines how many
1467  *                                      descriptors are completed before
1468  *                                      raising an interrupt.
1469  * @param [in] reg_base_addr,   The job ring base address register
1470  * @param [in] irq_id           The job ring interrupt identification number.
1471  * @retval  job_ring_handle for successful job ring configuration
1472  * @retval  NULL on error
1473  *
1474  */
1475 static void *
1476 init_job_ring(void *reg_base_addr, uint32_t irq_id)
1477 {
1478         struct sec_job_ring_t *job_ring = NULL;
1479         int i, ret = 0;
1480         int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
1481         int napi_mode = 0;
1482         int irq_coalescing_timer = 0;
1483         int irq_coalescing_count = 0;
1484
1485         for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
1486                 if (g_job_rings[i].irq_fd == 0) {
1487                         job_ring = &g_job_rings[i];
1488                         g_job_rings_no++;
1489                         break;
1490                 }
1491         }
1492         if (job_ring == NULL) {
1493                 CAAM_JR_ERR("No free job ring\n");
1494                 return NULL;
1495         }
1496
1497         job_ring->register_base_addr = reg_base_addr;
1498         job_ring->jr_mode = jr_mode;
1499         job_ring->napi_mode = 0;
1500         job_ring->irq_fd = irq_id;
1501
1502         /* Allocate mem for input and output ring */
1503
1504         /* Allocate memory for input ring */
1505         job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
1506                                 SEC_DMA_MEM_INPUT_RING_SIZE);
1507         memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
1508
1509         /* Allocate memory for output ring */
1510         job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
1511                                 SEC_DMA_MEM_OUTPUT_RING_SIZE);
1512         memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
1513
1514         /* Reset job ring in SEC hw and configure job ring registers */
1515         ret = hw_reset_job_ring(job_ring);
1516         if (ret != 0) {
1517                 CAAM_JR_ERR("Failed to reset hardware job ring");
1518                 goto cleanup;
1519         }
1520
1521         if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
1522         /* When SEC US driver works in NAPI mode, the UA can select
1523          * if the driver starts with IRQs on or off.
1524          */
1525                 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
1526                         CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
1527                                 job_ring);
1528                         ret = caam_jr_enable_irqs(job_ring->irq_fd);
1529                         if (ret != 0) {
1530                                 CAAM_JR_ERR("Failed to enable irqs for job ring");
1531                                 goto cleanup;
1532                         }
1533                 }
1534         } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
1535         /* When SEC US driver works in pure interrupt mode,
1536          * IRQ's are always enabled.
1537          */
1538                 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
1539                          job_ring);
1540                 ret = caam_jr_enable_irqs(job_ring->irq_fd);
1541                 if (ret != 0) {
1542                         CAAM_JR_ERR("Failed to enable irqs for job ring");
1543                         goto cleanup;
1544                 }
1545         }
1546         if (irq_coalescing_timer || irq_coalescing_count) {
1547                 hw_job_ring_set_coalescing_param(job_ring,
1548                          irq_coalescing_timer,
1549                          irq_coalescing_count);
1550
1551                 hw_job_ring_enable_coalescing(job_ring);
1552                 job_ring->coalescing_en = 1;
1553         }
1554
1555         job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
1556         job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
1557         job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
1558
1559         return job_ring;
1560 cleanup:
1561         caam_jr_dma_free(job_ring->output_ring);
1562         caam_jr_dma_free(job_ring->input_ring);
1563         return NULL;
1564 }
1565
1566
1567 static int
1568 caam_jr_dev_init(const char *name,
1569                  struct rte_vdev_device *vdev,
1570                  struct rte_cryptodev_pmd_init_params *init_params)
1571 {
1572         struct rte_cryptodev *dev;
1573         struct uio_job_ring *job_ring;
1574         char str[RTE_CRYPTODEV_NAME_MAX_LEN];
1575
1576         PMD_INIT_FUNC_TRACE();
1577
1578         /* Validate driver state */
1579         if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
1580                 g_job_rings_max = sec_configure();
1581                 if (!g_job_rings_max) {
1582                         CAAM_JR_ERR("No job ring detected on UIO !!!!");
1583                         return -1;
1584                 }
1585                 /* Update driver state */
1586                 g_driver_state = SEC_DRIVER_STATE_STARTED;
1587         }
1588
1589         if (g_job_rings_no >= g_job_rings_max) {
1590                 CAAM_JR_ERR("No more job rings available max=%d!!!!",
1591                                 g_job_rings_max);
1592                 return -1;
1593         }
1594
1595         job_ring = config_job_ring();
1596         if (job_ring == NULL) {
1597                 CAAM_JR_ERR("failed to create job ring");
1598                 goto init_error;
1599         }
1600
1601         snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
1602
1603         dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
1604         if (dev == NULL) {
1605                 CAAM_JR_ERR("failed to create cryptodev vdev");
1606                 goto cleanup;
1607         }
1608         /*TODO free it during teardown*/
1609         dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
1610                                                 job_ring->uio_fd);
1611
1612         if (!dev->data->dev_private) {
1613                 CAAM_JR_ERR("Ring memory allocation failed\n");
1614                 goto cleanup2;
1615         }
1616
1617         dev->driver_id = cryptodev_driver_id;
1618         dev->dev_ops = &caam_jr_ops;
1619
1620         /* register rx/tx burst functions for data path */
1621         dev->dequeue_burst = caam_jr_dequeue_burst;
1622         dev->enqueue_burst = caam_jr_enqueue_burst;
1623         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1624                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
1625                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
1626                         RTE_CRYPTODEV_FF_SECURITY |
1627                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
1628                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
1629                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
1630                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
1631                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
1632
1633         /* For secondary processes, we don't initialise any further as primary
1634          * has already done this work. Only check we don't need a different
1635          * RX function
1636          */
1637         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1638                 CAAM_JR_WARN("Device already init by primary process");
1639                 return 0;
1640         }
1641
1642         RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
1643
1644         return 0;
1645
1646 cleanup2:
1647         caam_jr_dev_uninit(dev);
1648         rte_cryptodev_pmd_release_device(dev);
1649 cleanup:
1650         free_job_ring(job_ring->uio_fd);
1651 init_error:
1652         CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
1653                         init_params->name);
1654
1655         return -ENXIO;
1656 }
1657
1658 /** Initialise CAAM JR crypto device */
1659 static int
1660 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
1661 {
1662         struct rte_cryptodev_pmd_init_params init_params = {
1663                 "",
1664                 sizeof(struct sec_job_ring_t),
1665                 rte_socket_id(),
1666                 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
1667         };
1668         const char *name;
1669         const char *input_args;
1670
1671         name = rte_vdev_device_name(vdev);
1672         if (name == NULL)
1673                 return -EINVAL;
1674
1675         input_args = rte_vdev_device_args(vdev);
1676         rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
1677
1678         /* if sec device version is not configured */
1679         if (!rta_get_sec_era()) {
1680                 const struct device_node *caam_node;
1681
1682                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1683                         const uint32_t *prop = of_get_property(caam_node,
1684                                         "fsl,sec-era",
1685                                         NULL);
1686                         if (prop) {
1687                                 rta_set_sec_era(
1688                                         INTL_SEC_ERA(cpu_to_caam32(*prop)));
1689                                 break;
1690                         }
1691                 }
1692         }
1693 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
1694         if (rta_get_sec_era() > RTA_SEC_ERA_8) {
1695                 RTE_LOG(ERR, PMD,
1696                 "CAAM is compiled in BE mode for device with sec era > 8???\n");
1697                 return -EINVAL;
1698         }
1699 #endif
1700
1701         return caam_jr_dev_init(name, vdev, &init_params);
1702 }
1703
1704 /** Uninitialise CAAM JR crypto device */
1705 static int
1706 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
1707 {
1708         struct rte_cryptodev *cryptodev;
1709         const char *name;
1710
1711         name = rte_vdev_device_name(vdev);
1712         if (name == NULL)
1713                 return -EINVAL;
1714
1715         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
1716         if (cryptodev == NULL)
1717                 return -ENODEV;
1718
1719         caam_jr_dev_uninit(cryptodev);
1720
1721         return rte_cryptodev_pmd_destroy(cryptodev);
1722 }
1723
1724 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
1725         .probe = cryptodev_caam_jr_probe,
1726         .remove = cryptodev_caam_jr_remove
1727 };
1728
1729 static struct cryptodev_driver caam_jr_crypto_drv;
1730
1731 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
1732 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
1733         "max_nb_queue_pairs=<int>"
1734         "socket_id=<int>");
1735 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
1736                 cryptodev_driver_id);
1737
1738 RTE_INIT(caam_jr_init_log)
1739 {
1740         caam_jr_logtype = rte_log_register("pmd.crypto.caam");
1741         if (caam_jr_logtype >= 0)
1742                 rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);
1743 }