drivers/crypto: return error for not supported SA lifetime
[dpdk.git] / drivers / crypto / caam_jr / caam_jr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017-2019 NXP
3  */
4
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <sched.h>
8 #include <net/if.h>
9
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
19
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
26
27 /* RTA header files */
28 #include <desc/common.h>
29 #include <desc/algo.h>
30 #include <dpaa_of.h>
31 #ifdef RTE_LIBRTE_PMD_CAAM_JR_DEBUG
32 #define CAAM_JR_DBG    1
33 #else
34 #define CAAM_JR_DBG     0
35 #endif
36 #define CRYPTODEV_NAME_CAAM_JR_PMD      crypto_caam_jr
37 static uint8_t cryptodev_driver_id;
38
39 /* Lists the states possible for the SEC user space driver. */
40 enum sec_driver_state_e {
41         SEC_DRIVER_STATE_IDLE,          /* Driver not initialized */
42         SEC_DRIVER_STATE_STARTED,       /* Driver initialized and can be used*/
43         SEC_DRIVER_STATE_RELEASE,       /* Driver release is in progress */
44 };
45
46 /* Job rings used for communication with SEC HW */
47 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
48
49 /* The current state of SEC user space driver */
50 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
51
52 /* The number of job rings used by SEC user space driver */
53 static int g_job_rings_no;
54 static int g_job_rings_max;
55
56 struct sec_outring_entry {
57         phys_addr_t desc;       /* Pointer to completed descriptor */
58         uint32_t status;        /* Status for completed descriptor */
59 } __rte_packed;
60
61 /* virtual address conversin when mempool support is available for ctx */
62 static inline phys_addr_t
63 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
64 {
65         return (size_t)vaddr - ctx->vtop_offset;
66 }
67
68 static inline void
69 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
70 {
71         /* report op status to sym->op and then free the ctx memory  */
72         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
73 }
74
75 static inline struct caam_jr_op_ctx *
76 caam_jr_alloc_ctx(struct caam_jr_session *ses)
77 {
78         struct caam_jr_op_ctx *ctx;
79         int ret;
80
81         ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
82         if (!ctx || ret) {
83                 CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
84                 return NULL;
85         }
86         /*
87          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
88          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
89          * to clear all the SG entries. caam_jr_alloc_ctx() is called for
90          * each packet, memset is costlier than dcbz_64().
91          */
92         dcbz_64(&ctx->sg[SG_CACHELINE_0]);
93         dcbz_64(&ctx->sg[SG_CACHELINE_1]);
94         dcbz_64(&ctx->sg[SG_CACHELINE_2]);
95         dcbz_64(&ctx->sg[SG_CACHELINE_3]);
96
97         ctx->ctx_pool = ses->ctx_pool;
98         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
99
100         return ctx;
101 }
102
103 static
104 void caam_jr_stats_get(struct rte_cryptodev *dev,
105                         struct rte_cryptodev_stats *stats)
106 {
107         struct caam_jr_qp **qp = (struct caam_jr_qp **)
108                                         dev->data->queue_pairs;
109         int i;
110
111         PMD_INIT_FUNC_TRACE();
112         if (stats == NULL) {
113                 CAAM_JR_ERR("Invalid stats ptr NULL");
114                 return;
115         }
116         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
117                 if (qp[i] == NULL) {
118                         CAAM_JR_WARN("Uninitialised queue pair");
119                         continue;
120                 }
121
122                 stats->enqueued_count += qp[i]->tx_pkts;
123                 stats->dequeued_count += qp[i]->rx_pkts;
124                 stats->enqueue_err_count += qp[i]->tx_errs;
125                 stats->dequeue_err_count += qp[i]->rx_errs;
126                 CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
127                              "\n\tTX Ring Full = %" PRIu64,
128                              qp[i]->rx_poll_err,
129                              qp[i]->tx_ring_full);
130         }
131 }
132
133 static
134 void caam_jr_stats_reset(struct rte_cryptodev *dev)
135 {
136         int i;
137         struct caam_jr_qp **qp = (struct caam_jr_qp **)
138                                    (dev->data->queue_pairs);
139
140         PMD_INIT_FUNC_TRACE();
141         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
142                 if (qp[i] == NULL) {
143                         CAAM_JR_WARN("Uninitialised queue pair");
144                         continue;
145                 }
146                 qp[i]->rx_pkts = 0;
147                 qp[i]->rx_errs = 0;
148                 qp[i]->rx_poll_err = 0;
149                 qp[i]->tx_pkts = 0;
150                 qp[i]->tx_errs = 0;
151                 qp[i]->tx_ring_full = 0;
152         }
153 }
154
155 static inline int
156 is_cipher_only(struct caam_jr_session *ses)
157 {
158         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
159                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
160 }
161
162 static inline int
163 is_auth_only(struct caam_jr_session *ses)
164 {
165         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
166                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
167 }
168
169 static inline int
170 is_aead(struct caam_jr_session *ses)
171 {
172         return ((ses->cipher_alg == 0) &&
173                 (ses->auth_alg == 0) &&
174                 (ses->aead_alg != 0));
175 }
176
177 static inline int
178 is_auth_cipher(struct caam_jr_session *ses)
179 {
180         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
181                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
182                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
183 }
184
185 static inline int
186 is_proto_ipsec(struct caam_jr_session *ses)
187 {
188         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
189 }
190
191 static inline int
192 is_encode(struct caam_jr_session *ses)
193 {
194         return ses->dir == DIR_ENC;
195 }
196
197 static inline int
198 is_decode(struct caam_jr_session *ses)
199 {
200         return ses->dir == DIR_DEC;
201 }
202
203 static inline void
204 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
205 {
206         switch (ses->auth_alg) {
207         case RTE_CRYPTO_AUTH_NULL:
208                 ses->digest_length = 0;
209                 break;
210         case RTE_CRYPTO_AUTH_MD5_HMAC:
211                 alginfo_a->algtype =
212                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
213                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
214                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
215                 break;
216         case RTE_CRYPTO_AUTH_SHA1_HMAC:
217                 alginfo_a->algtype =
218                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
219                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
220                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
221                 break;
222         case RTE_CRYPTO_AUTH_SHA224_HMAC:
223                 alginfo_a->algtype =
224                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
225                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
226                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
227                 break;
228         case RTE_CRYPTO_AUTH_SHA256_HMAC:
229                 alginfo_a->algtype =
230                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
231                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
232                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
233                 break;
234         case RTE_CRYPTO_AUTH_SHA384_HMAC:
235                 alginfo_a->algtype =
236                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
237                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
238                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
239                 break;
240         case RTE_CRYPTO_AUTH_SHA512_HMAC:
241                 alginfo_a->algtype =
242                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
243                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
244                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
245                 break;
246         default:
247                 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
248         }
249 }
250
251 static inline void
252 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
253 {
254         switch (ses->cipher_alg) {
255         case RTE_CRYPTO_CIPHER_NULL:
256                 break;
257         case RTE_CRYPTO_CIPHER_AES_CBC:
258                 alginfo_c->algtype =
259                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
260                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
261                 alginfo_c->algmode = OP_ALG_AAI_CBC;
262                 break;
263         case RTE_CRYPTO_CIPHER_3DES_CBC:
264                 alginfo_c->algtype =
265                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
266                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
267                 alginfo_c->algmode = OP_ALG_AAI_CBC;
268                 break;
269         case RTE_CRYPTO_CIPHER_AES_CTR:
270                 alginfo_c->algtype =
271                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
272                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
273                 alginfo_c->algmode = OP_ALG_AAI_CTR;
274                 break;
275         default:
276                 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
277         }
278 }
279
280 static inline void
281 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
282 {
283         switch (ses->aead_alg) {
284         case RTE_CRYPTO_AEAD_AES_GCM:
285                 alginfo->algtype = OP_ALG_ALGSEL_AES;
286                 alginfo->algmode = OP_ALG_AAI_GCM;
287                 break;
288         default:
289                 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
290         }
291 }
292
293 /* prepare command block of the session */
294 static int
295 caam_jr_prep_cdb(struct caam_jr_session *ses)
296 {
297         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
298         int32_t shared_desc_len = 0;
299         struct sec_cdb *cdb;
300         int err;
301 #if CAAM_BYTE_ORDER == CORE_BYTE_ORDER
302         int swap = false;
303 #else
304         int swap = true;
305 #endif
306
307         if (ses->cdb)
308                 caam_jr_dma_free(ses->cdb);
309
310         cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
311         if (!cdb) {
312                 CAAM_JR_ERR("failed to allocate memory for cdb\n");
313                 return -1;
314         }
315
316         ses->cdb = cdb;
317
318         memset(cdb, 0, sizeof(struct sec_cdb));
319
320         if (is_cipher_only(ses)) {
321                 caam_cipher_alg(ses, &alginfo_c);
322                 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
323                         CAAM_JR_ERR("not supported cipher alg");
324                         rte_free(cdb);
325                         return -ENOTSUP;
326                 }
327
328                 alginfo_c.key = (size_t)ses->cipher_key.data;
329                 alginfo_c.keylen = ses->cipher_key.length;
330                 alginfo_c.key_enc_flags = 0;
331                 alginfo_c.key_type = RTA_DATA_IMM;
332
333                 shared_desc_len = cnstr_shdsc_blkcipher(
334                                                 cdb->sh_desc, true,
335                                                 swap, SHR_NEVER, &alginfo_c,
336                                                 ses->iv.length,
337                                                 ses->dir);
338         } else if (is_auth_only(ses)) {
339                 caam_auth_alg(ses, &alginfo_a);
340                 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
341                         CAAM_JR_ERR("not supported auth alg");
342                         rte_free(cdb);
343                         return -ENOTSUP;
344                 }
345
346                 alginfo_a.key = (size_t)ses->auth_key.data;
347                 alginfo_a.keylen = ses->auth_key.length;
348                 alginfo_a.key_enc_flags = 0;
349                 alginfo_a.key_type = RTA_DATA_IMM;
350
351                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
352                                                    swap, SHR_NEVER, &alginfo_a,
353                                                    !ses->dir,
354                                                    ses->digest_length);
355         } else if (is_aead(ses)) {
356                 caam_aead_alg(ses, &alginfo);
357                 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
358                         CAAM_JR_ERR("not supported aead alg");
359                         rte_free(cdb);
360                         return -ENOTSUP;
361                 }
362                 alginfo.key = (size_t)ses->aead_key.data;
363                 alginfo.keylen = ses->aead_key.length;
364                 alginfo.key_enc_flags = 0;
365                 alginfo.key_type = RTA_DATA_IMM;
366
367                 if (ses->dir == DIR_ENC)
368                         shared_desc_len = cnstr_shdsc_gcm_encap(
369                                         cdb->sh_desc, true, swap,
370                                         SHR_NEVER, &alginfo,
371                                         ses->iv.length,
372                                         ses->digest_length);
373                 else
374                         shared_desc_len = cnstr_shdsc_gcm_decap(
375                                         cdb->sh_desc, true, swap,
376                                         SHR_NEVER, &alginfo,
377                                         ses->iv.length,
378                                         ses->digest_length);
379         } else {
380                 caam_cipher_alg(ses, &alginfo_c);
381                 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
382                         CAAM_JR_ERR("not supported cipher alg");
383                         rte_free(cdb);
384                         return -ENOTSUP;
385                 }
386
387                 alginfo_c.key = (size_t)ses->cipher_key.data;
388                 alginfo_c.keylen = ses->cipher_key.length;
389                 alginfo_c.key_enc_flags = 0;
390                 alginfo_c.key_type = RTA_DATA_IMM;
391
392                 caam_auth_alg(ses, &alginfo_a);
393                 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
394                         CAAM_JR_ERR("not supported auth alg");
395                         rte_free(cdb);
396                         return -ENOTSUP;
397                 }
398
399                 alginfo_a.key = (size_t)ses->auth_key.data;
400                 alginfo_a.keylen = ses->auth_key.length;
401                 alginfo_a.key_enc_flags = 0;
402                 alginfo_a.key_type = RTA_DATA_IMM;
403
404                 cdb->sh_desc[0] = alginfo_c.keylen;
405                 cdb->sh_desc[1] = alginfo_a.keylen;
406                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
407                                        MIN_JOB_DESC_SIZE,
408                                        (unsigned int *)cdb->sh_desc,
409                                        &cdb->sh_desc[2], 2);
410
411                 if (err < 0) {
412                         CAAM_JR_ERR("Crypto: Incorrect key lengths");
413                         rte_free(cdb);
414                         return err;
415                 }
416                 if (cdb->sh_desc[2] & 1)
417                         alginfo_c.key_type = RTA_DATA_IMM;
418                 else {
419                         alginfo_c.key = (size_t)caam_jr_mem_vtop(
420                                                 (void *)(size_t)alginfo_c.key);
421                         alginfo_c.key_type = RTA_DATA_PTR;
422                 }
423                 if (cdb->sh_desc[2] & (1<<1))
424                         alginfo_a.key_type = RTA_DATA_IMM;
425                 else {
426                         alginfo_a.key = (size_t)caam_jr_mem_vtop(
427                                                 (void *)(size_t)alginfo_a.key);
428                         alginfo_a.key_type = RTA_DATA_PTR;
429                 }
430                 cdb->sh_desc[0] = 0;
431                 cdb->sh_desc[1] = 0;
432                 cdb->sh_desc[2] = 0;
433                 if (is_proto_ipsec(ses)) {
434                         if (ses->dir == DIR_ENC) {
435                                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
436                                                 cdb->sh_desc,
437                                                 true, swap, SHR_SERIAL,
438                                                 &ses->encap_pdb,
439                                                 (uint8_t *)&ses->ip4_hdr,
440                                                 &alginfo_c, &alginfo_a);
441                         } else if (ses->dir == DIR_DEC) {
442                                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
443                                                 cdb->sh_desc,
444                                                 true, swap, SHR_SERIAL,
445                                                 &ses->decap_pdb,
446                                                 &alginfo_c, &alginfo_a);
447                         }
448                 } else {
449                         /* Auth_only_len is overwritten in fd for each job */
450                         shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
451                                         true, swap, SHR_SERIAL,
452                                         &alginfo_c, &alginfo_a,
453                                         ses->iv.length,
454                                         ses->digest_length, ses->dir);
455                 }
456         }
457
458         if (shared_desc_len < 0) {
459                 CAAM_JR_ERR("error in preparing command block");
460                 return shared_desc_len;
461         }
462
463 #if CAAM_JR_DBG
464         SEC_DUMP_DESC(cdb->sh_desc);
465 #endif
466
467         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
468
469         return 0;
470 }
471
472 /* @brief Poll the HW for already processed jobs in the JR
473  * and silently discard the available jobs or notify them to UA
474  * with indicated error code.
475  *
476  * @param [in,out]  job_ring        The job ring to poll.
477  * @param [in]  do_notify           Can be #TRUE or #FALSE. Indicates if
478  *                                  descriptors are to be discarded
479  *                                  or notified to UA with given error_code.
480  * @param [out] notified_descs    Number of notified descriptors. Can be NULL
481  *                                      if do_notify is #FALSE
482  */
483 static void
484 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
485                   uint32_t do_notify,
486                   uint32_t *notified_descs)
487 {
488         int32_t jobs_no_to_discard = 0;
489         int32_t discarded_descs_no = 0;
490
491         CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
492                 job_ring, job_ring->pidx, job_ring->cidx, do_notify);
493
494         jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
495
496         /* Discard all jobs */
497         CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
498                   job_ring, job_ring->pidx, job_ring->cidx,
499                   jobs_no_to_discard);
500
501         while (jobs_no_to_discard > discarded_descs_no) {
502                 discarded_descs_no++;
503                 /* Now increment the consumer index for the current job ring,
504                  * AFTER saving job in temporary location!
505                  * Increment the consumer index for the current job ring
506                  */
507                 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
508                                          SEC_JOB_RING_SIZE);
509
510                 hw_remove_entries(job_ring, 1);
511         }
512
513         if (do_notify == true) {
514                 ASSERT(notified_descs != NULL);
515                 *notified_descs = discarded_descs_no;
516         }
517 }
518
519 /* @brief Poll the HW for already processed jobs in the JR
520  * and notify the available jobs to UA.
521  *
522  * @param [in]  job_ring        The job ring to poll.
523  * @param [in]  limit           The maximum number of jobs to notify.
524  *                              If set to negative value, all available jobs are
525  *                              notified.
526  *
527  * @retval >=0 for No of jobs notified to UA.
528  * @retval -1 for error
529  */
530 static int
531 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
532                  struct rte_crypto_op **ops, int32_t limit,
533                  struct caam_jr_qp *jr_qp)
534 {
535         int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
536         int32_t number_of_jobs_available = 0;
537         int32_t notified_descs_no = 0;
538         uint32_t sec_error_code = 0;
539         struct job_descriptor *current_desc;
540         phys_addr_t current_desc_addr;
541         phys_addr_t *temp_addr;
542         struct caam_jr_op_ctx *ctx;
543
544         /* TODO check for ops have memory*/
545         /* check here if any JR error that cannot be written
546          * in the output status word has occurred
547          */
548         if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
549                 CAAM_JR_INFO("err received");
550                 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
551                                         GET_JR_REG(JRINT, job_ring));
552                 if (unlikely(sec_error_code)) {
553                         hw_job_ring_error_print(job_ring, sec_error_code);
554                         return -1;
555                 }
556         }
557         /* compute the number of jobs available in the job ring based on the
558          * producer and consumer index values.
559          */
560         number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
561         /* Compute the number of notifications that need to be raised to UA
562          * If limit > total number of done jobs -> notify all done jobs
563          * If limit = 0 -> error
564          * If limit < total number of done jobs -> notify a number
565          * of done jobs equal with limit
566          */
567         jobs_no_to_notify = (limit > number_of_jobs_available) ?
568                                 number_of_jobs_available : limit;
569         CAAM_JR_DP_DEBUG(
570                 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
571                 job_ring, job_ring->pidx, job_ring->cidx,
572                 limit, number_of_jobs_available, jobs_no_to_notify);
573
574         rte_smp_rmb();
575
576         while (jobs_no_to_notify > notified_descs_no) {
577                 static uint64_t false_alarm;
578                 static uint64_t real_poll;
579
580                 /* Get job status here */
581                 sec_error_code = job_ring->output_ring[job_ring->cidx].status;
582                 /* Get completed descriptor */
583                 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
584                 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
585
586                 real_poll++;
587                 /* todo check if it is false alarm no desc present */
588                 if (!current_desc_addr) {
589                         false_alarm++;
590                         printf("false alarm %" PRIu64 "real %" PRIu64
591                                 " sec_err =0x%x cidx Index =0%d\n",
592                                 false_alarm, real_poll,
593                                 sec_error_code, job_ring->cidx);
594                         rte_panic("CAAM JR descriptor NULL");
595                         return notified_descs_no;
596                 }
597                 current_desc = (struct job_descriptor *)
598                                 caam_jr_dma_ptov(current_desc_addr);
599                 /* now increment the consumer index for the current job ring,
600                  * AFTER saving job in temporary location!
601                  */
602                 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
603                                  SEC_JOB_RING_SIZE);
604                 /* Signal that the job has been processed and the slot is free*/
605                 hw_remove_entries(job_ring, 1);
606                 /*TODO for multiple ops, packets*/
607                 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
608                 if (unlikely(sec_error_code)) {
609                         CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
610                                 job_ring->cidx, sec_error_code);
611                         hw_handle_job_ring_error(job_ring, sec_error_code);
612                         //todo improve with exact errors
613                         ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
614                         jr_qp->rx_errs++;
615                 } else {
616                         ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
617 #if CAAM_JR_DBG
618                         if (ctx->op->sym->m_dst) {
619                                 rte_hexdump(stdout, "PROCESSED",
620                                 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
621                                 rte_pktmbuf_data_len(ctx->op->sym->m_dst));
622                         } else {
623                                 rte_hexdump(stdout, "PROCESSED",
624                                 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
625                                 rte_pktmbuf_data_len(ctx->op->sym->m_src));
626                         }
627 #endif
628                 }
629                 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
630                         struct ip *ip4_hdr;
631
632                         if (ctx->op->sym->m_dst) {
633                                 /*TODO check for ip header or other*/
634                                 ip4_hdr = (struct ip *)
635                                 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
636                                 ctx->op->sym->m_dst->pkt_len =
637                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
638                                 ctx->op->sym->m_dst->data_len =
639                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
640                         } else {
641                                 ip4_hdr = (struct ip *)
642                                 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
643                                 ctx->op->sym->m_src->pkt_len =
644                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
645                                 ctx->op->sym->m_src->data_len =
646                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
647                         }
648                 }
649                 *ops = ctx->op;
650                 caam_jr_op_ending(ctx);
651                 ops++;
652                 notified_descs_no++;
653         }
654         return notified_descs_no;
655 }
656
657 static uint16_t
658 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
659                        uint16_t nb_ops)
660 {
661         struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
662         struct sec_job_ring_t *ring = jr_qp->ring;
663         int num_rx;
664         int ret;
665
666         CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
667
668         /* Poll job ring
669          * If nb_ops < 0 -> poll JR until no more notifications are available.
670          * If nb_ops > 0 -> poll JR until limit is reached.
671          */
672
673         /* Run hw poll job ring */
674         num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
675         if (num_rx < 0) {
676                 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
677                 return 0;
678         }
679
680         CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
681
682         if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
683                 if (num_rx < nb_ops) {
684                         ret = caam_jr_enable_irqs(ring->irq_fd);
685                         SEC_ASSERT(ret == 0, ret,
686                         "Failed to enable irqs for job ring %p", ring);
687                 }
688         } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
689
690                 /* Always enable IRQ generation when in pure IRQ mode */
691                 ret = caam_jr_enable_irqs(ring->irq_fd);
692                 SEC_ASSERT(ret == 0, ret,
693                         "Failed to enable irqs for job ring %p", ring);
694         }
695
696         jr_qp->rx_pkts += num_rx;
697
698         return num_rx;
699 }
700
701 /**
702  * packet looks like:
703  *              |<----data_len------->|
704  *    |ip_header|ah_header|icv|payload|
705  *              ^
706  *              |
707  *         mbuf->pkt.data
708  */
709 static inline struct caam_jr_op_ctx *
710 build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
711 {
712         struct rte_crypto_sym_op *sym = op->sym;
713         struct rte_mbuf *mbuf = sym->m_src;
714         struct caam_jr_op_ctx *ctx;
715         struct sec4_sg_entry *sg;
716         int     length;
717         struct sec_cdb *cdb;
718         uint64_t sdesc_offset;
719         struct sec_job_descriptor_t *jobdescr;
720         uint8_t extra_segs;
721
722         if (is_decode(ses))
723                 extra_segs = 2;
724         else
725                 extra_segs = 1;
726
727         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
728                 CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
729                                 MAX_SG_ENTRIES);
730                 return NULL;
731         }
732
733         ctx = caam_jr_alloc_ctx(ses);
734         if (!ctx)
735                 return NULL;
736
737         ctx->op = op;
738
739         cdb = ses->cdb;
740         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
741
742         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
743
744         SEC_JD_INIT(jobdescr);
745         SEC_JD_SET_SD(jobdescr,
746                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
747                 cdb->sh_hdr.hi.field.idlen);
748
749         /* output */
750         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
751                         0, ses->digest_length);
752
753         /*input */
754         sg = &ctx->sg[0];
755         length = sym->auth.data.length;
756         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
757         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
758
759         /* Successive segs */
760         mbuf = mbuf->next;
761         while (mbuf) {
762                 sg++;
763                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
764                 sg->len = cpu_to_caam32(mbuf->data_len);
765                 mbuf = mbuf->next;
766         }
767
768         if (is_decode(ses)) {
769                 /* digest verification case */
770                 sg++;
771                 /* hash result or digest, save digest first */
772                 rte_memcpy(ctx->digest, sym->auth.digest.data,
773                            ses->digest_length);
774 #if CAAM_JR_DBG
775                 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
776 #endif
777                 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
778                 sg->len = cpu_to_caam32(ses->digest_length);
779                 length += ses->digest_length;
780         } else {
781                 sg->len -= ses->digest_length;
782         }
783
784         /* last element*/
785         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
786
787         SEC_JD_SET_IN_PTR(jobdescr,
788                 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
789         /* enabling sg list */
790         (jobdescr)->seq_in.command.word  |= 0x01000000;
791
792         return ctx;
793 }
794
795 static inline struct caam_jr_op_ctx *
796 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
797 {
798         struct rte_crypto_sym_op *sym = op->sym;
799         struct caam_jr_op_ctx *ctx;
800         struct sec4_sg_entry *sg;
801         rte_iova_t start_addr;
802         struct sec_cdb *cdb;
803         uint64_t sdesc_offset;
804         struct sec_job_descriptor_t *jobdescr;
805
806         ctx = caam_jr_alloc_ctx(ses);
807         if (!ctx)
808                 return NULL;
809
810         ctx->op = op;
811
812         cdb = ses->cdb;
813         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
814
815         start_addr = rte_pktmbuf_iova(sym->m_src);
816
817         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
818
819         SEC_JD_INIT(jobdescr);
820         SEC_JD_SET_SD(jobdescr,
821                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
822                 cdb->sh_hdr.hi.field.idlen);
823
824         /* output */
825         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
826                         0, ses->digest_length);
827
828         /*input */
829         if (is_decode(ses)) {
830                 sg = &ctx->sg[0];
831                 SEC_JD_SET_IN_PTR(jobdescr,
832                         (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
833                         (sym->auth.data.length + ses->digest_length));
834                 /* enabling sg list */
835                 (jobdescr)->seq_in.command.word  |= 0x01000000;
836
837                 /* hash result or digest, save digest first */
838                 rte_memcpy(ctx->digest, sym->auth.digest.data,
839                            ses->digest_length);
840                 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
841                 sg->len = cpu_to_caam32(sym->auth.data.length);
842
843 #if CAAM_JR_DBG
844                 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
845 #endif
846                 /* let's check digest by hw */
847                 sg++;
848                 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
849                 sg->len = cpu_to_caam32(ses->digest_length);
850                 /* last element*/
851                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
852         } else {
853                 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
854                         sym->auth.data.offset, sym->auth.data.length);
855         }
856         return ctx;
857 }
858
859 static inline struct caam_jr_op_ctx *
860 build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
861 {
862         struct rte_crypto_sym_op *sym = op->sym;
863         struct rte_mbuf *mbuf = sym->m_src;
864         struct caam_jr_op_ctx *ctx;
865         struct sec4_sg_entry *sg, *in_sg;
866         int length;
867         struct sec_cdb *cdb;
868         uint64_t sdesc_offset;
869         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
870                         ses->iv.offset);
871         struct sec_job_descriptor_t *jobdescr;
872         uint8_t reg_segs;
873
874         if (sym->m_dst) {
875                 mbuf = sym->m_dst;
876                 reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
877         } else {
878                 mbuf = sym->m_src;
879                 reg_segs = mbuf->nb_segs * 2 + 2;
880         }
881
882         if (reg_segs > MAX_SG_ENTRIES) {
883                 CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
884                                 MAX_SG_ENTRIES);
885                 return NULL;
886         }
887
888         ctx = caam_jr_alloc_ctx(ses);
889         if (!ctx)
890                 return NULL;
891
892         ctx->op = op;
893         cdb = ses->cdb;
894         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
895
896         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
897
898         SEC_JD_INIT(jobdescr);
899         SEC_JD_SET_SD(jobdescr,
900                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
901                 cdb->sh_hdr.hi.field.idlen);
902
903 #if CAAM_JR_DBG
904         CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
905                         sym->m_src->data_off, sym->cipher.data.offset,
906                         sym->cipher.data.length, ses->iv.length);
907 #endif
908         /* output */
909         if (sym->m_dst)
910                 mbuf = sym->m_dst;
911         else
912                 mbuf = sym->m_src;
913
914         sg = &ctx->sg[0];
915         length = sym->cipher.data.length;
916
917         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
918                 + sym->cipher.data.offset);
919         sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
920
921         /* Successive segs */
922         mbuf = mbuf->next;
923         while (mbuf) {
924                 sg++;
925                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
926                 sg->len = cpu_to_caam32(mbuf->data_len);
927                 mbuf = mbuf->next;
928         }
929         /* last element*/
930         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
931
932         SEC_JD_SET_OUT_PTR(jobdescr,
933                         (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
934                         length);
935         /*enabling sg bit */
936         (jobdescr)->seq_out.command.word  |= 0x01000000;
937
938         /*input */
939         sg++;
940         mbuf = sym->m_src;
941         in_sg = sg;
942
943         length = sym->cipher.data.length + ses->iv.length;
944
945         /* IV */
946         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
947         sg->len = cpu_to_caam32(ses->iv.length);
948
949         /* 1st seg */
950         sg++;
951         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
952                                 + sym->cipher.data.offset);
953         sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
954
955         /* Successive segs */
956         mbuf = mbuf->next;
957         while (mbuf) {
958                 sg++;
959                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
960                 sg->len = cpu_to_caam32(mbuf->data_len);
961                 mbuf = mbuf->next;
962         }
963         /* last element*/
964         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
965
966
967         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
968                                 length);
969         /*enabling sg bit */
970         (jobdescr)->seq_in.command.word  |= 0x01000000;
971
972         return ctx;
973 }
974
975 static inline struct caam_jr_op_ctx *
976 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
977 {
978         struct rte_crypto_sym_op *sym = op->sym;
979         struct caam_jr_op_ctx *ctx;
980         struct sec4_sg_entry *sg;
981         rte_iova_t src_start_addr, dst_start_addr;
982         struct sec_cdb *cdb;
983         uint64_t sdesc_offset;
984         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
985                         ses->iv.offset);
986         struct sec_job_descriptor_t *jobdescr;
987
988         ctx = caam_jr_alloc_ctx(ses);
989         if (!ctx)
990                 return NULL;
991
992         ctx->op = op;
993         cdb = ses->cdb;
994         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
995
996         src_start_addr = rte_pktmbuf_iova(sym->m_src);
997         if (sym->m_dst)
998                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
999         else
1000                 dst_start_addr = src_start_addr;
1001
1002         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1003
1004         SEC_JD_INIT(jobdescr);
1005         SEC_JD_SET_SD(jobdescr,
1006                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1007                 cdb->sh_hdr.hi.field.idlen);
1008
1009 #if CAAM_JR_DBG
1010         CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
1011                         sym->m_src->data_off, sym->cipher.data.offset,
1012                         sym->cipher.data.length, ses->iv.length);
1013 #endif
1014         /* output */
1015         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
1016                         sym->cipher.data.offset,
1017                         sym->cipher.data.length + ses->iv.length);
1018
1019         /*input */
1020         sg = &ctx->sg[0];
1021         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
1022                                 sym->cipher.data.length + ses->iv.length);
1023         /*enabling sg bit */
1024         (jobdescr)->seq_in.command.word  |= 0x01000000;
1025
1026         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1027         sg->len = cpu_to_caam32(ses->iv.length);
1028
1029         sg = &ctx->sg[1];
1030         sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
1031         sg->len = cpu_to_caam32(sym->cipher.data.length);
1032         /* last element*/
1033         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1034
1035         return ctx;
1036 }
1037
1038 /* For decapsulation:
1039  *     Input:
1040  * +----+----------------+--------------------------------+-----+
1041  * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
1042  * +----+----------------+--------------------------------+-----+
1043  *     Output:
1044  * +----+--------------------------+
1045  * | Decrypted & authenticated data |
1046  * +----+--------------------------+
1047  */
1048
1049 static inline struct caam_jr_op_ctx *
1050 build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
1051 {
1052         struct rte_crypto_sym_op *sym = op->sym;
1053         struct caam_jr_op_ctx *ctx;
1054         struct sec4_sg_entry *sg, *out_sg, *in_sg;
1055         struct rte_mbuf *mbuf;
1056         uint32_t length = 0;
1057         struct sec_cdb *cdb;
1058         uint64_t sdesc_offset;
1059         uint8_t req_segs;
1060         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1061                         ses->iv.offset);
1062         struct sec_job_descriptor_t *jobdescr;
1063         uint16_t auth_hdr_len = sym->cipher.data.offset -
1064                         sym->auth.data.offset;
1065         uint16_t auth_tail_len = sym->auth.data.length -
1066                         sym->cipher.data.length - auth_hdr_len;
1067         uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1068
1069         if (sym->m_dst) {
1070                 mbuf = sym->m_dst;
1071                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1072         } else {
1073                 mbuf = sym->m_src;
1074                 req_segs = mbuf->nb_segs * 2 + 3;
1075         }
1076
1077         if (req_segs > MAX_SG_ENTRIES) {
1078                 CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1079                                 MAX_SG_ENTRIES);
1080                 return NULL;
1081         }
1082
1083         ctx = caam_jr_alloc_ctx(ses);
1084         if (!ctx)
1085                 return NULL;
1086
1087         ctx->op = op;
1088         cdb = ses->cdb;
1089         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1090
1091         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1092
1093         SEC_JD_INIT(jobdescr);
1094         SEC_JD_SET_SD(jobdescr,
1095                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1096                 cdb->sh_hdr.hi.field.idlen);
1097
1098         /* output */
1099         if (sym->m_dst)
1100                 mbuf = sym->m_dst;
1101         else
1102                 mbuf = sym->m_src;
1103
1104         out_sg = &ctx->sg[0];
1105         if (is_encode(ses))
1106                 length = sym->auth.data.length + ses->digest_length;
1107         else
1108                 length = sym->auth.data.length;
1109
1110         sg = &ctx->sg[0];
1111
1112         /* 1st seg */
1113         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1114                 + sym->auth.data.offset);
1115         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1116
1117         /* Successive segs */
1118         mbuf = mbuf->next;
1119         while (mbuf) {
1120                 sg++;
1121                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1122                 sg->len = cpu_to_caam32(mbuf->data_len);
1123                 mbuf = mbuf->next;
1124         }
1125
1126         if (is_encode(ses)) {
1127                 /* set auth output */
1128                 sg++;
1129                 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1130                 sg->len = cpu_to_caam32(ses->digest_length);
1131         }
1132         /* last element*/
1133         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1134
1135         SEC_JD_SET_OUT_PTR(jobdescr,
1136                            (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
1137         /* set sg bit */
1138         (jobdescr)->seq_out.command.word  |= 0x01000000;
1139
1140         /* input */
1141         sg++;
1142         mbuf = sym->m_src;
1143         in_sg = sg;
1144         if (is_encode(ses))
1145                 length = ses->iv.length + sym->auth.data.length;
1146         else
1147                 length = ses->iv.length + sym->auth.data.length
1148                                                 + ses->digest_length;
1149
1150         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1151         sg->len = cpu_to_caam32(ses->iv.length);
1152
1153         sg++;
1154         /* 1st seg */
1155         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1156                 + sym->auth.data.offset);
1157         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1158
1159         /* Successive segs */
1160         mbuf = mbuf->next;
1161         while (mbuf) {
1162                 sg++;
1163                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1164                 sg->len = cpu_to_caam32(mbuf->data_len);
1165                 mbuf = mbuf->next;
1166         }
1167
1168         if (is_decode(ses)) {
1169                 sg++;
1170                 rte_memcpy(ctx->digest, sym->auth.digest.data,
1171                        ses->digest_length);
1172                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1173                 sg->len = cpu_to_caam32(ses->digest_length);
1174         }
1175         /* last element*/
1176         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1177
1178         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
1179                                 length);
1180         /* set sg bit */
1181         (jobdescr)->seq_in.command.word  |= 0x01000000;
1182         /* Auth_only_len is set as 0 in descriptor and it is
1183          * overwritten here in the jd which will update
1184          * the DPOVRD reg.
1185          */
1186         if (auth_only_len)
1187                 /* set sg bit */
1188                 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1189
1190         return ctx;
1191 }
1192
1193 static inline struct caam_jr_op_ctx *
1194 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
1195 {
1196         struct rte_crypto_sym_op *sym = op->sym;
1197         struct caam_jr_op_ctx *ctx;
1198         struct sec4_sg_entry *sg;
1199         rte_iova_t src_start_addr, dst_start_addr;
1200         uint32_t length = 0;
1201         struct sec_cdb *cdb;
1202         uint64_t sdesc_offset;
1203         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1204                         ses->iv.offset);
1205         struct sec_job_descriptor_t *jobdescr;
1206         uint16_t auth_hdr_len = sym->cipher.data.offset -
1207                         sym->auth.data.offset;
1208         uint16_t auth_tail_len = sym->auth.data.length -
1209                         sym->cipher.data.length - auth_hdr_len;
1210         uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1211
1212         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1213         if (sym->m_dst)
1214                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1215         else
1216                 dst_start_addr = src_start_addr;
1217
1218         ctx = caam_jr_alloc_ctx(ses);
1219         if (!ctx)
1220                 return NULL;
1221
1222         ctx->op = op;
1223         cdb = ses->cdb;
1224         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1225
1226         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1227
1228         SEC_JD_INIT(jobdescr);
1229         SEC_JD_SET_SD(jobdescr,
1230                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1231                 cdb->sh_hdr.hi.field.idlen);
1232
1233         /* input */
1234         sg = &ctx->sg[0];
1235         if (is_encode(ses)) {
1236                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1237                 sg->len = cpu_to_caam32(ses->iv.length);
1238                 length += ses->iv.length;
1239
1240                 sg++;
1241                 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1242                 sg->len = cpu_to_caam32(sym->auth.data.length);
1243                 length += sym->auth.data.length;
1244                 /* last element*/
1245                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1246         } else {
1247                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1248                 sg->len = cpu_to_caam32(ses->iv.length);
1249                 length += ses->iv.length;
1250
1251                 sg++;
1252                 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1253                 sg->len = cpu_to_caam32(sym->auth.data.length);
1254                 length += sym->auth.data.length;
1255
1256                 rte_memcpy(ctx->digest, sym->auth.digest.data,
1257                        ses->digest_length);
1258                 sg++;
1259                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1260                 sg->len = cpu_to_caam32(ses->digest_length);
1261                 length += ses->digest_length;
1262                 /* last element*/
1263                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1264         }
1265
1266         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
1267                                 length);
1268         /* set sg bit */
1269         (jobdescr)->seq_in.command.word  |= 0x01000000;
1270
1271         /* output */
1272         sg = &ctx->sg[6];
1273
1274         sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
1275         sg->len = cpu_to_caam32(sym->cipher.data.length);
1276         length = sym->cipher.data.length;
1277
1278         if (is_encode(ses)) {
1279                 /* set auth output */
1280                 sg++;
1281                 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1282                 sg->len = cpu_to_caam32(ses->digest_length);
1283                 length += ses->digest_length;
1284         }
1285         /* last element*/
1286         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1287
1288         SEC_JD_SET_OUT_PTR(jobdescr,
1289                            (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
1290         /* set sg bit */
1291         (jobdescr)->seq_out.command.word  |= 0x01000000;
1292
1293         /* Auth_only_len is set as 0 in descriptor and it is
1294          * overwritten here in the jd which will update
1295          * the DPOVRD reg.
1296          */
1297         if (auth_only_len)
1298                 /* set sg bit */
1299                 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1300
1301         return ctx;
1302 }
1303
1304 static inline struct caam_jr_op_ctx *
1305 build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
1306 {
1307         struct rte_crypto_sym_op *sym = op->sym;
1308         struct caam_jr_op_ctx *ctx = NULL;
1309         phys_addr_t src_start_addr, dst_start_addr;
1310         struct sec_cdb *cdb;
1311         uint64_t sdesc_offset;
1312         struct sec_job_descriptor_t *jobdescr;
1313
1314         ctx = caam_jr_alloc_ctx(ses);
1315         if (!ctx)
1316                 return NULL;
1317         ctx->op = op;
1318
1319         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1320         if (sym->m_dst)
1321                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1322         else
1323                 dst_start_addr = src_start_addr;
1324
1325         cdb = ses->cdb;
1326         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1327
1328         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1329
1330         SEC_JD_INIT(jobdescr);
1331         SEC_JD_SET_SD(jobdescr,
1332                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1333                         cdb->sh_hdr.hi.field.idlen);
1334
1335         /* output */
1336         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
1337                         sym->m_src->buf_len - sym->m_src->data_off);
1338         /* input */
1339         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
1340                         sym->m_src->pkt_len);
1341         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1342
1343         return ctx;
1344 }
1345
1346 static int
1347 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
1348 {
1349         struct sec_job_ring_t *ring = qp->ring;
1350         struct caam_jr_session *ses;
1351         struct caam_jr_op_ctx *ctx = NULL;
1352         struct sec_job_descriptor_t *jobdescr __rte_unused;
1353 #if CAAM_JR_DBG
1354         int i;
1355 #endif
1356
1357         switch (op->sess_type) {
1358         case RTE_CRYPTO_OP_WITH_SESSION:
1359                 ses = (struct caam_jr_session *)
1360                 get_sym_session_private_data(op->sym->session,
1361                                         cryptodev_driver_id);
1362                 break;
1363         case RTE_CRYPTO_OP_SECURITY_SESSION:
1364                 ses = (struct caam_jr_session *)
1365                         get_sec_session_private_data(
1366                                         op->sym->sec_session);
1367                 break;
1368         default:
1369                 CAAM_JR_DP_ERR("sessionless crypto op not supported");
1370                 qp->tx_errs++;
1371                 return -1;
1372         }
1373
1374         if (unlikely(!ses->qp || ses->qp != qp)) {
1375                 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
1376                 ses->qp = qp;
1377                 caam_jr_prep_cdb(ses);
1378         }
1379
1380         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1381                 if (is_auth_cipher(ses))
1382                         ctx = build_cipher_auth(op, ses);
1383                 else if (is_aead(ses))
1384                         goto err1;
1385                 else if (is_auth_only(ses))
1386                         ctx = build_auth_only(op, ses);
1387                 else if (is_cipher_only(ses))
1388                         ctx = build_cipher_only(op, ses);
1389                 else if (is_proto_ipsec(ses))
1390                         ctx = build_proto(op, ses);
1391         } else {
1392                 if (is_auth_cipher(ses))
1393                         ctx = build_cipher_auth_sg(op, ses);
1394                 else if (is_aead(ses))
1395                         goto err1;
1396                 else if (is_auth_only(ses))
1397                         ctx = build_auth_only_sg(op, ses);
1398                 else if (is_cipher_only(ses))
1399                         ctx = build_cipher_only_sg(op, ses);
1400         }
1401 err1:
1402         if (unlikely(!ctx)) {
1403                 qp->tx_errs++;
1404                 CAAM_JR_ERR("not supported sec op");
1405                 return -1;
1406         }
1407 #if CAAM_JR_DBG
1408         if (is_decode(ses))
1409                 rte_hexdump(stdout, "DECODE",
1410                         rte_pktmbuf_mtod(op->sym->m_src, void *),
1411                         rte_pktmbuf_data_len(op->sym->m_src));
1412         else
1413                 rte_hexdump(stdout, "ENCODE",
1414                         rte_pktmbuf_mtod(op->sym->m_src, void *),
1415                         rte_pktmbuf_data_len(op->sym->m_src));
1416
1417         printf("\n JD before conversion\n");
1418         for (i = 0; i < 12; i++)
1419                 printf("\n 0x%08x", ctx->jobdes.desc[i]);
1420 #endif
1421
1422         CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
1423                       ring, ring->pidx, ring->cidx);
1424
1425         /* todo - do we want to retry */
1426         if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
1427                          SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
1428                 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
1429                               ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
1430                 caam_jr_op_ending(ctx);
1431                 qp->tx_ring_full++;
1432                 return -EBUSY;
1433         }
1434
1435 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
1436         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1437
1438         jobdescr->deschdr.command.word =
1439                 cpu_to_caam32(jobdescr->deschdr.command.word);
1440         jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
1441         jobdescr->seq_out.command.word =
1442                 cpu_to_caam32(jobdescr->seq_out.command.word);
1443         jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
1444         jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
1445         jobdescr->seq_in.command.word =
1446                 cpu_to_caam32(jobdescr->seq_in.command.word);
1447         jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
1448         jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
1449         jobdescr->load_dpovrd.command.word =
1450                 cpu_to_caam32(jobdescr->load_dpovrd.command.word);
1451         jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
1452 #endif
1453
1454         /* Set ptr in input ring to current descriptor  */
1455         sec_write_addr(&ring->input_ring[ring->pidx],
1456                         (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
1457         rte_smp_wmb();
1458
1459         /* Notify HW that a new job is enqueued */
1460         hw_enqueue_desc_on_job_ring(ring);
1461
1462         /* increment the producer index for the current job ring */
1463         ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
1464
1465         return 0;
1466 }
1467
1468 static uint16_t
1469 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1470                        uint16_t nb_ops)
1471 {
1472         /* Function to transmit the frames to given device and queuepair */
1473         uint32_t loop;
1474         int32_t ret;
1475         struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
1476         uint16_t num_tx = 0;
1477         /*Prepare each packet which is to be sent*/
1478         for (loop = 0; loop < nb_ops; loop++) {
1479                 ret = caam_jr_enqueue_op(ops[loop], jr_qp);
1480                 if (!ret)
1481                         num_tx++;
1482         }
1483
1484         jr_qp->tx_pkts += num_tx;
1485
1486         return num_tx;
1487 }
1488
1489 /* Release queue pair */
1490 static int
1491 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1492                            uint16_t qp_id)
1493 {
1494         struct sec_job_ring_t *internals;
1495         struct caam_jr_qp *qp = NULL;
1496
1497         PMD_INIT_FUNC_TRACE();
1498         CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1499
1500         internals = dev->data->dev_private;
1501         if (qp_id >= internals->max_nb_queue_pairs) {
1502                 CAAM_JR_ERR("Max supported qpid %d",
1503                              internals->max_nb_queue_pairs);
1504                 return -EINVAL;
1505         }
1506
1507         qp = &internals->qps[qp_id];
1508         qp->ring = NULL;
1509         dev->data->queue_pairs[qp_id] = NULL;
1510
1511         return 0;
1512 }
1513
1514 /* Setup a queue pair */
1515 static int
1516 caam_jr_queue_pair_setup(
1517                 struct rte_cryptodev *dev, uint16_t qp_id,
1518                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1519                 __rte_unused int socket_id)
1520 {
1521         struct sec_job_ring_t *internals;
1522         struct caam_jr_qp *qp = NULL;
1523
1524         PMD_INIT_FUNC_TRACE();
1525         CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1526
1527         internals = dev->data->dev_private;
1528         if (qp_id >= internals->max_nb_queue_pairs) {
1529                 CAAM_JR_ERR("Max supported qpid %d",
1530                              internals->max_nb_queue_pairs);
1531                 return -EINVAL;
1532         }
1533
1534         qp = &internals->qps[qp_id];
1535         qp->ring = internals;
1536         dev->data->queue_pairs[qp_id] = qp;
1537
1538         return 0;
1539 }
1540
1541 /* Returns the size of the aesni gcm session structure */
1542 static unsigned int
1543 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1544 {
1545         PMD_INIT_FUNC_TRACE();
1546
1547         return sizeof(struct caam_jr_session);
1548 }
1549
1550 static int
1551 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1552                     struct rte_crypto_sym_xform *xform,
1553                     struct caam_jr_session *session)
1554 {
1555         session->cipher_alg = xform->cipher.algo;
1556         session->iv.length = xform->cipher.iv.length;
1557         session->iv.offset = xform->cipher.iv.offset;
1558         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1559                                                RTE_CACHE_LINE_SIZE);
1560         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1561                 CAAM_JR_ERR("No Memory for cipher key\n");
1562                 return -ENOMEM;
1563         }
1564         session->cipher_key.length = xform->cipher.key.length;
1565
1566         memcpy(session->cipher_key.data, xform->cipher.key.data,
1567                xform->cipher.key.length);
1568         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1569                         DIR_ENC : DIR_DEC;
1570
1571         return 0;
1572 }
1573
1574 static int
1575 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1576                   struct rte_crypto_sym_xform *xform,
1577                   struct caam_jr_session *session)
1578 {
1579         session->auth_alg = xform->auth.algo;
1580         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1581                                              RTE_CACHE_LINE_SIZE);
1582         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1583                 CAAM_JR_ERR("No Memory for auth key\n");
1584                 return -ENOMEM;
1585         }
1586         session->auth_key.length = xform->auth.key.length;
1587         session->digest_length = xform->auth.digest_length;
1588
1589         memcpy(session->auth_key.data, xform->auth.key.data,
1590                xform->auth.key.length);
1591         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1592                         DIR_ENC : DIR_DEC;
1593
1594         return 0;
1595 }
1596
1597 static int
1598 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1599                   struct rte_crypto_sym_xform *xform,
1600                   struct caam_jr_session *session)
1601 {
1602         session->aead_alg = xform->aead.algo;
1603         session->iv.length = xform->aead.iv.length;
1604         session->iv.offset = xform->aead.iv.offset;
1605         session->auth_only_len = xform->aead.aad_length;
1606         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1607                                              RTE_CACHE_LINE_SIZE);
1608         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1609                 CAAM_JR_ERR("No Memory for aead key\n");
1610                 return -ENOMEM;
1611         }
1612         session->aead_key.length = xform->aead.key.length;
1613         session->digest_length = xform->aead.digest_length;
1614
1615         memcpy(session->aead_key.data, xform->aead.key.data,
1616                xform->aead.key.length);
1617         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1618                         DIR_ENC : DIR_DEC;
1619
1620         return 0;
1621 }
1622
1623 static int
1624 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1625                                struct rte_crypto_sym_xform *xform, void *sess)
1626 {
1627         struct sec_job_ring_t *internals = dev->data->dev_private;
1628         struct caam_jr_session *session = sess;
1629
1630         PMD_INIT_FUNC_TRACE();
1631
1632         if (unlikely(sess == NULL)) {
1633                 CAAM_JR_ERR("invalid session struct");
1634                 return -EINVAL;
1635         }
1636
1637         /* Default IV length = 0 */
1638         session->iv.length = 0;
1639
1640         /* Cipher Only */
1641         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1642                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1643                 caam_jr_cipher_init(dev, xform, session);
1644
1645         /* Authentication Only */
1646         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1647                    xform->next == NULL) {
1648                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1649                 caam_jr_auth_init(dev, xform, session);
1650
1651         /* Cipher then Authenticate */
1652         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1653                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1654                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1655                         caam_jr_cipher_init(dev, xform, session);
1656                         caam_jr_auth_init(dev, xform->next, session);
1657                 } else {
1658                         CAAM_JR_ERR("Not supported: Auth then Cipher");
1659                         goto err1;
1660                 }
1661
1662         /* Authenticate then Cipher */
1663         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1664                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1665                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1666                         caam_jr_auth_init(dev, xform, session);
1667                         caam_jr_cipher_init(dev, xform->next, session);
1668                 } else {
1669                         CAAM_JR_ERR("Not supported: Auth then Cipher");
1670                         goto err1;
1671                 }
1672
1673         /* AEAD operation for AES-GCM kind of Algorithms */
1674         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1675                    xform->next == NULL) {
1676                 caam_jr_aead_init(dev, xform, session);
1677
1678         } else {
1679                 CAAM_JR_ERR("Invalid crypto type");
1680                 return -EINVAL;
1681         }
1682         session->ctx_pool = internals->ctx_pool;
1683
1684         return 0;
1685
1686 err1:
1687         rte_free(session->cipher_key.data);
1688         rte_free(session->auth_key.data);
1689         memset(session, 0, sizeof(struct caam_jr_session));
1690
1691         return -EINVAL;
1692 }
1693
1694 static int
1695 caam_jr_sym_session_configure(struct rte_cryptodev *dev,
1696                               struct rte_crypto_sym_xform *xform,
1697                               struct rte_cryptodev_sym_session *sess,
1698                               struct rte_mempool *mempool)
1699 {
1700         void *sess_private_data;
1701         int ret;
1702
1703         PMD_INIT_FUNC_TRACE();
1704
1705         if (rte_mempool_get(mempool, &sess_private_data)) {
1706                 CAAM_JR_ERR("Couldn't get object from session mempool");
1707                 return -ENOMEM;
1708         }
1709
1710         memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1711         ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1712         if (ret != 0) {
1713                 CAAM_JR_ERR("failed to configure session parameters");
1714                 /* Return session to mempool */
1715                 rte_mempool_put(mempool, sess_private_data);
1716                 return ret;
1717         }
1718
1719         set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
1720
1721         return 0;
1722 }
1723
1724 /* Clear the memory of session so it doesn't leave key material behind */
1725 static void
1726 caam_jr_sym_session_clear(struct rte_cryptodev *dev,
1727                 struct rte_cryptodev_sym_session *sess)
1728 {
1729         uint8_t index = dev->driver_id;
1730         void *sess_priv = get_sym_session_private_data(sess, index);
1731         struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1732
1733         PMD_INIT_FUNC_TRACE();
1734
1735         if (sess_priv) {
1736                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1737
1738                 rte_free(s->cipher_key.data);
1739                 rte_free(s->auth_key.data);
1740                 memset(s, 0, sizeof(struct caam_jr_session));
1741                 set_sym_session_private_data(sess, index, NULL);
1742                 rte_mempool_put(sess_mp, sess_priv);
1743         }
1744 }
1745
1746 static int
1747 caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1748                           struct rte_security_session_conf *conf,
1749                           void *sess)
1750 {
1751         struct sec_job_ring_t *internals = dev->data->dev_private;
1752         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1753         struct rte_crypto_auth_xform *auth_xform;
1754         struct rte_crypto_cipher_xform *cipher_xform;
1755         struct caam_jr_session *session = (struct caam_jr_session *)sess;
1756
1757         PMD_INIT_FUNC_TRACE();
1758
1759         if (ipsec_xform->life.bytes_hard_limit != 0 ||
1760             ipsec_xform->life.bytes_soft_limit != 0 ||
1761             ipsec_xform->life.packets_hard_limit != 0 ||
1762             ipsec_xform->life.packets_soft_limit != 0)
1763                 return -ENOTSUP;
1764
1765         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1766                 cipher_xform = &conf->crypto_xform->cipher;
1767                 auth_xform = &conf->crypto_xform->next->auth;
1768         } else {
1769                 auth_xform = &conf->crypto_xform->auth;
1770                 cipher_xform = &conf->crypto_xform->next->cipher;
1771         }
1772         session->proto_alg = conf->protocol;
1773         session->cipher_key.data = rte_zmalloc(NULL,
1774                                                cipher_xform->key.length,
1775                                                RTE_CACHE_LINE_SIZE);
1776         if (session->cipher_key.data == NULL &&
1777                         cipher_xform->key.length > 0) {
1778                 CAAM_JR_ERR("No Memory for cipher key\n");
1779                 return -ENOMEM;
1780         }
1781
1782         session->cipher_key.length = cipher_xform->key.length;
1783         session->auth_key.data = rte_zmalloc(NULL,
1784                                         auth_xform->key.length,
1785                                         RTE_CACHE_LINE_SIZE);
1786         if (session->auth_key.data == NULL &&
1787                         auth_xform->key.length > 0) {
1788                 CAAM_JR_ERR("No Memory for auth key\n");
1789                 rte_free(session->cipher_key.data);
1790                 return -ENOMEM;
1791         }
1792         session->auth_key.length = auth_xform->key.length;
1793         memcpy(session->cipher_key.data, cipher_xform->key.data,
1794                         cipher_xform->key.length);
1795         memcpy(session->auth_key.data, auth_xform->key.data,
1796                         auth_xform->key.length);
1797
1798         switch (auth_xform->algo) {
1799         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1800                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1801                 break;
1802         case RTE_CRYPTO_AUTH_MD5_HMAC:
1803                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1804                 break;
1805         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1806                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1807                 break;
1808         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1809                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1810                 break;
1811         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1812                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1813                 break;
1814         case RTE_CRYPTO_AUTH_AES_CMAC:
1815                 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1816                 break;
1817         case RTE_CRYPTO_AUTH_NULL:
1818                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1819                 break;
1820         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1821         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1822         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1823         case RTE_CRYPTO_AUTH_SHA1:
1824         case RTE_CRYPTO_AUTH_SHA256:
1825         case RTE_CRYPTO_AUTH_SHA512:
1826         case RTE_CRYPTO_AUTH_SHA224:
1827         case RTE_CRYPTO_AUTH_SHA384:
1828         case RTE_CRYPTO_AUTH_MD5:
1829         case RTE_CRYPTO_AUTH_AES_GMAC:
1830         case RTE_CRYPTO_AUTH_KASUMI_F9:
1831         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1832         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1833                 CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
1834                         auth_xform->algo);
1835                 goto out;
1836         default:
1837                 CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
1838                         auth_xform->algo);
1839                 goto out;
1840         }
1841
1842         switch (cipher_xform->algo) {
1843         case RTE_CRYPTO_CIPHER_AES_CBC:
1844                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1845                 break;
1846         case RTE_CRYPTO_CIPHER_3DES_CBC:
1847                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1848                 break;
1849         case RTE_CRYPTO_CIPHER_AES_CTR:
1850                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1851                 break;
1852         case RTE_CRYPTO_CIPHER_NULL:
1853         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1854         case RTE_CRYPTO_CIPHER_3DES_ECB:
1855         case RTE_CRYPTO_CIPHER_AES_ECB:
1856         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1857                 CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
1858                         cipher_xform->algo);
1859                 goto out;
1860         default:
1861                 CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
1862                         cipher_xform->algo);
1863                 goto out;
1864         }
1865
1866         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1867                 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1868                                 sizeof(session->ip4_hdr));
1869                 session->ip4_hdr.ip_v = IPVERSION;
1870                 session->ip4_hdr.ip_hl = 5;
1871                 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1872                                                 sizeof(session->ip4_hdr));
1873                 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1874                 session->ip4_hdr.ip_id = 0;
1875                 session->ip4_hdr.ip_off = 0;
1876                 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1877                 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1878                                 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1879                                 : IPPROTO_AH;
1880                 session->ip4_hdr.ip_sum = 0;
1881                 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1882                 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1883                 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1884                                                 (void *)&session->ip4_hdr,
1885                                                 sizeof(struct ip));
1886
1887                 session->encap_pdb.options =
1888                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1889                         PDBOPTS_ESP_OIHI_PDB_INL |
1890                         PDBOPTS_ESP_IVSRC;
1891                 if (ipsec_xform->options.dec_ttl)
1892                         session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
1893                 if (ipsec_xform->options.esn)
1894                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
1895                 session->encap_pdb.spi = ipsec_xform->spi;
1896                 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1897
1898                 session->dir = DIR_ENC;
1899         } else if (ipsec_xform->direction ==
1900                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1901                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1902                 session->decap_pdb.options = sizeof(struct ip) << 16;
1903                 if (ipsec_xform->options.esn)
1904                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
1905                 session->dir = DIR_DEC;
1906         } else
1907                 goto out;
1908         session->ctx_pool = internals->ctx_pool;
1909
1910         return 0;
1911 out:
1912         rte_free(session->auth_key.data);
1913         rte_free(session->cipher_key.data);
1914         memset(session, 0, sizeof(struct caam_jr_session));
1915         return -1;
1916 }
1917
1918 static int
1919 caam_jr_security_session_create(void *dev,
1920                                 struct rte_security_session_conf *conf,
1921                                 struct rte_security_session *sess,
1922                                 struct rte_mempool *mempool)
1923 {
1924         void *sess_private_data;
1925         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1926         int ret;
1927
1928         if (rte_mempool_get(mempool, &sess_private_data)) {
1929                 CAAM_JR_ERR("Couldn't get object from session mempool");
1930                 return -ENOMEM;
1931         }
1932
1933         switch (conf->protocol) {
1934         case RTE_SECURITY_PROTOCOL_IPSEC:
1935                 ret = caam_jr_set_ipsec_session(cdev, conf,
1936                                 sess_private_data);
1937                 break;
1938         case RTE_SECURITY_PROTOCOL_MACSEC:
1939                 return -ENOTSUP;
1940         default:
1941                 return -EINVAL;
1942         }
1943         if (ret != 0) {
1944                 CAAM_JR_ERR("failed to configure session parameters");
1945                 /* Return session to mempool */
1946                 rte_mempool_put(mempool, sess_private_data);
1947                 return ret;
1948         }
1949
1950         set_sec_session_private_data(sess, sess_private_data);
1951
1952         return ret;
1953 }
1954
1955 /* Clear the memory of session so it doesn't leave key material behind */
1956 static int
1957 caam_jr_security_session_destroy(void *dev __rte_unused,
1958                                  struct rte_security_session *sess)
1959 {
1960         PMD_INIT_FUNC_TRACE();
1961         void *sess_priv = get_sec_session_private_data(sess);
1962
1963         struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1964
1965         if (sess_priv) {
1966                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1967
1968                 rte_free(s->cipher_key.data);
1969                 rte_free(s->auth_key.data);
1970                 memset(sess, 0, sizeof(struct caam_jr_session));
1971                 set_sec_session_private_data(sess, NULL);
1972                 rte_mempool_put(sess_mp, sess_priv);
1973         }
1974         return 0;
1975 }
1976
1977
1978 static int
1979 caam_jr_dev_configure(struct rte_cryptodev *dev,
1980                        struct rte_cryptodev_config *config __rte_unused)
1981 {
1982         char str[20];
1983         struct sec_job_ring_t *internals;
1984
1985         PMD_INIT_FUNC_TRACE();
1986
1987         internals = dev->data->dev_private;
1988         snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
1989         if (!internals->ctx_pool) {
1990                 internals->ctx_pool = rte_mempool_create((const char *)str,
1991                                                 CTX_POOL_NUM_BUFS,
1992                                                 sizeof(struct caam_jr_op_ctx),
1993                                                 CTX_POOL_CACHE_SIZE, 0,
1994                                                 NULL, NULL, NULL, NULL,
1995                                                 SOCKET_ID_ANY, 0);
1996                 if (!internals->ctx_pool) {
1997                         CAAM_JR_ERR("%s create failed\n", str);
1998                         return -ENOMEM;
1999                 }
2000         } else
2001                 CAAM_JR_INFO("mempool already created for dev_id : %d",
2002                                 dev->data->dev_id);
2003
2004         return 0;
2005 }
2006
2007 static int
2008 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
2009 {
2010         PMD_INIT_FUNC_TRACE();
2011         return 0;
2012 }
2013
2014 static void
2015 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
2016 {
2017         PMD_INIT_FUNC_TRACE();
2018 }
2019
2020 static int
2021 caam_jr_dev_close(struct rte_cryptodev *dev)
2022 {
2023         struct sec_job_ring_t *internals;
2024
2025         PMD_INIT_FUNC_TRACE();
2026
2027         if (dev == NULL)
2028                 return -ENOMEM;
2029
2030         internals = dev->data->dev_private;
2031         rte_mempool_free(internals->ctx_pool);
2032         internals->ctx_pool = NULL;
2033
2034         return 0;
2035 }
2036
2037 static void
2038 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
2039                        struct rte_cryptodev_info *info)
2040 {
2041         struct sec_job_ring_t *internals = dev->data->dev_private;
2042
2043         PMD_INIT_FUNC_TRACE();
2044         if (info != NULL) {
2045                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2046                 info->feature_flags = dev->feature_flags;
2047                 info->capabilities = caam_jr_get_cryptodev_capabilities();
2048                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2049                 info->driver_id = cryptodev_driver_id;
2050         }
2051 }
2052
2053 static struct rte_cryptodev_ops caam_jr_ops = {
2054         .dev_configure        = caam_jr_dev_configure,
2055         .dev_start            = caam_jr_dev_start,
2056         .dev_stop             = caam_jr_dev_stop,
2057         .dev_close            = caam_jr_dev_close,
2058         .dev_infos_get        = caam_jr_dev_infos_get,
2059         .stats_get            = caam_jr_stats_get,
2060         .stats_reset          = caam_jr_stats_reset,
2061         .queue_pair_setup     = caam_jr_queue_pair_setup,
2062         .queue_pair_release   = caam_jr_queue_pair_release,
2063         .sym_session_get_size = caam_jr_sym_session_get_size,
2064         .sym_session_configure = caam_jr_sym_session_configure,
2065         .sym_session_clear    = caam_jr_sym_session_clear
2066 };
2067
2068 static struct rte_security_ops caam_jr_security_ops = {
2069         .session_create = caam_jr_security_session_create,
2070         .session_update = NULL,
2071         .session_stats_get = NULL,
2072         .session_destroy = caam_jr_security_session_destroy,
2073         .set_pkt_metadata = NULL,
2074         .capabilities_get = caam_jr_get_security_capabilities
2075 };
2076
2077 /* @brief Flush job rings of any processed descs.
2078  * The processed descs are silently dropped,
2079  * WITHOUT being notified to UA.
2080  */
2081 static void
2082 close_job_ring(struct sec_job_ring_t *job_ring)
2083 {
2084         if (job_ring->irq_fd != -1) {
2085                 /* Producer index is frozen. If consumer index is not equal
2086                  * with producer index, then we have descs to flush.
2087                  */
2088                 while (job_ring->pidx != job_ring->cidx)
2089                         hw_flush_job_ring(job_ring, false, NULL);
2090
2091                 /* free the uio job ring */
2092                 free_job_ring(job_ring->irq_fd);
2093                 job_ring->irq_fd = -1;
2094                 caam_jr_dma_free(job_ring->input_ring);
2095                 caam_jr_dma_free(job_ring->output_ring);
2096                 g_job_rings_no--;
2097         }
2098 }
2099
2100 /** @brief Release the software and hardware resources tied to a job ring.
2101  * @param [in] job_ring The job ring
2102  *
2103  * @retval  0 for success
2104  * @retval  -1 for error
2105  */
2106 static int
2107 shutdown_job_ring(struct sec_job_ring_t *job_ring)
2108 {
2109         int ret = 0;
2110
2111         PMD_INIT_FUNC_TRACE();
2112         ASSERT(job_ring != NULL);
2113         ret = hw_shutdown_job_ring(job_ring);
2114         SEC_ASSERT(ret == 0, ret,
2115                 "Failed to shutdown hardware job ring %p",
2116                 job_ring);
2117
2118         if (job_ring->coalescing_en)
2119                 hw_job_ring_disable_coalescing(job_ring);
2120
2121         if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
2122                 ret = caam_jr_disable_irqs(job_ring->irq_fd);
2123                 SEC_ASSERT(ret == 0, ret,
2124                 "Failed to disable irqs for job ring %p",
2125                 job_ring);
2126         }
2127
2128         return ret;
2129 }
2130
2131 /*
2132  * @brief Release the resources used by the SEC user space driver.
2133  *
2134  * Reset and release SEC's job rings indicated by the User Application at
2135  * init_job_ring() and free any memory allocated internally.
2136  * Call once during application tear down.
2137  *
2138  * @note In case there are any descriptors in-flight (descriptors received by
2139  * SEC driver for processing and for which no response was yet provided to UA),
2140  * the descriptors are discarded without any notifications to User Application.
2141  *
2142  * @retval ::0                  is returned for a successful execution
2143  * @retval ::-1         is returned if SEC driver release is in progress
2144  */
2145 static int
2146 caam_jr_dev_uninit(struct rte_cryptodev *dev)
2147 {
2148         struct sec_job_ring_t *internals;
2149
2150         PMD_INIT_FUNC_TRACE();
2151         if (dev == NULL)
2152                 return -ENODEV;
2153
2154         internals = dev->data->dev_private;
2155         rte_free(dev->security_ctx);
2156
2157         /* If any descriptors in flight , poll and wait
2158          * until all descriptors are received and silently discarded.
2159          */
2160         if (internals) {
2161                 shutdown_job_ring(internals);
2162                 close_job_ring(internals);
2163                 rte_mempool_free(internals->ctx_pool);
2164         }
2165
2166         CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
2167
2168         /* last caam jr instance) */
2169         if (g_job_rings_no == 0)
2170                 g_driver_state = SEC_DRIVER_STATE_IDLE;
2171
2172         return SEC_SUCCESS;
2173 }
2174
2175 /* @brief Initialize the software and hardware resources tied to a job ring.
2176  * @param [in] jr_mode;         Model to be used by SEC Driver to receive
2177  *                              notifications from SEC.  Can be either
2178  *                              of the three: #SEC_NOTIFICATION_TYPE_NAPI
2179  *                              #SEC_NOTIFICATION_TYPE_IRQ or
2180  *                              #SEC_NOTIFICATION_TYPE_POLL
2181  * @param [in] NAPI_mode        The NAPI work mode to configure a job ring at
2182  *                              startup. Used only when #SEC_NOTIFICATION_TYPE
2183  *                              is set to #SEC_NOTIFICATION_TYPE_NAPI.
2184  * @param [in] irq_coalescing_timer This value determines the maximum
2185  *                                      amount of time after processing a
2186  *                                      descriptor before raising an interrupt.
2187  * @param [in] irq_coalescing_count This value determines how many
2188  *                                      descriptors are completed before
2189  *                                      raising an interrupt.
2190  * @param [in] reg_base_addr,   The job ring base address register
2191  * @param [in] irq_id           The job ring interrupt identification number.
2192  * @retval  job_ring_handle for successful job ring configuration
2193  * @retval  NULL on error
2194  *
2195  */
2196 static void *
2197 init_job_ring(void *reg_base_addr, int irq_id)
2198 {
2199         struct sec_job_ring_t *job_ring = NULL;
2200         int i, ret = 0;
2201         int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
2202         int napi_mode = 0;
2203         int irq_coalescing_timer = 0;
2204         int irq_coalescing_count = 0;
2205
2206         for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
2207                 if (g_job_rings[i].irq_fd == -1) {
2208                         job_ring = &g_job_rings[i];
2209                         g_job_rings_no++;
2210                         break;
2211                 }
2212         }
2213         if (job_ring == NULL) {
2214                 CAAM_JR_ERR("No free job ring\n");
2215                 return NULL;
2216         }
2217
2218         job_ring->register_base_addr = reg_base_addr;
2219         job_ring->jr_mode = jr_mode;
2220         job_ring->napi_mode = 0;
2221         job_ring->irq_fd = irq_id;
2222
2223         /* Allocate mem for input and output ring */
2224
2225         /* Allocate memory for input ring */
2226         job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2227                                 SEC_DMA_MEM_INPUT_RING_SIZE);
2228         memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
2229
2230         /* Allocate memory for output ring */
2231         job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2232                                 SEC_DMA_MEM_OUTPUT_RING_SIZE);
2233         memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
2234
2235         /* Reset job ring in SEC hw and configure job ring registers */
2236         ret = hw_reset_job_ring(job_ring);
2237         if (ret != 0) {
2238                 CAAM_JR_ERR("Failed to reset hardware job ring");
2239                 goto cleanup;
2240         }
2241
2242         if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
2243         /* When SEC US driver works in NAPI mode, the UA can select
2244          * if the driver starts with IRQs on or off.
2245          */
2246                 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
2247                         CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
2248                                 job_ring);
2249                         ret = caam_jr_enable_irqs(job_ring->irq_fd);
2250                         if (ret != 0) {
2251                                 CAAM_JR_ERR("Failed to enable irqs for job ring");
2252                                 goto cleanup;
2253                         }
2254                 }
2255         } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
2256         /* When SEC US driver works in pure interrupt mode,
2257          * IRQ's are always enabled.
2258          */
2259                 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
2260                          job_ring);
2261                 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2262                 if (ret != 0) {
2263                         CAAM_JR_ERR("Failed to enable irqs for job ring");
2264                         goto cleanup;
2265                 }
2266         }
2267         if (irq_coalescing_timer || irq_coalescing_count) {
2268                 hw_job_ring_set_coalescing_param(job_ring,
2269                          irq_coalescing_timer,
2270                          irq_coalescing_count);
2271
2272                 hw_job_ring_enable_coalescing(job_ring);
2273                 job_ring->coalescing_en = 1;
2274         }
2275
2276         job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
2277         job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
2278         job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
2279
2280         return job_ring;
2281 cleanup:
2282         caam_jr_dma_free(job_ring->output_ring);
2283         caam_jr_dma_free(job_ring->input_ring);
2284         return NULL;
2285 }
2286
2287
2288 static int
2289 caam_jr_dev_init(const char *name,
2290                  struct rte_vdev_device *vdev,
2291                  struct rte_cryptodev_pmd_init_params *init_params)
2292 {
2293         struct rte_cryptodev *dev;
2294         struct rte_security_ctx *security_instance;
2295         struct uio_job_ring *job_ring;
2296         char str[RTE_CRYPTODEV_NAME_MAX_LEN];
2297
2298         PMD_INIT_FUNC_TRACE();
2299
2300         /* Validate driver state */
2301         if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
2302                 g_job_rings_max = sec_configure();
2303                 if (!g_job_rings_max) {
2304                         CAAM_JR_ERR("No job ring detected on UIO !!!!");
2305                         return -1;
2306                 }
2307                 /* Update driver state */
2308                 g_driver_state = SEC_DRIVER_STATE_STARTED;
2309         }
2310
2311         if (g_job_rings_no >= g_job_rings_max) {
2312                 CAAM_JR_ERR("No more job rings available max=%d!!!!",
2313                                 g_job_rings_max);
2314                 return -1;
2315         }
2316
2317         job_ring = config_job_ring();
2318         if (job_ring == NULL) {
2319                 CAAM_JR_ERR("failed to create job ring");
2320                 goto init_error;
2321         }
2322
2323         snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
2324
2325         dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2326         if (dev == NULL) {
2327                 CAAM_JR_ERR("failed to create cryptodev vdev");
2328                 goto cleanup;
2329         }
2330         /*TODO free it during teardown*/
2331         dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
2332                                                 job_ring->uio_fd);
2333
2334         if (!dev->data->dev_private) {
2335                 CAAM_JR_ERR("Ring memory allocation failed\n");
2336                 goto cleanup2;
2337         }
2338
2339         dev->driver_id = cryptodev_driver_id;
2340         dev->dev_ops = &caam_jr_ops;
2341
2342         /* register rx/tx burst functions for data path */
2343         dev->dequeue_burst = caam_jr_dequeue_burst;
2344         dev->enqueue_burst = caam_jr_enqueue_burst;
2345         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2346                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2347                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2348                         RTE_CRYPTODEV_FF_SECURITY |
2349                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2350                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2351                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2352                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2353                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2354
2355         /* For secondary processes, we don't initialise any further as primary
2356          * has already done this work. Only check we don't need a different
2357          * RX function
2358          */
2359         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2360                 CAAM_JR_WARN("Device already init by primary process");
2361                 return 0;
2362         }
2363
2364         /*TODO free it during teardown*/
2365         security_instance = rte_malloc("caam_jr",
2366                                 sizeof(struct rte_security_ctx), 0);
2367         if (security_instance == NULL) {
2368                 CAAM_JR_ERR("memory allocation failed\n");
2369                 //todo error handling.
2370                 goto cleanup2;
2371         }
2372
2373         security_instance->device = (void *)dev;
2374         security_instance->ops = &caam_jr_security_ops;
2375         security_instance->sess_cnt = 0;
2376         dev->security_ctx = security_instance;
2377
2378         rte_cryptodev_pmd_probing_finish(dev);
2379
2380         RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
2381
2382         return 0;
2383
2384 cleanup2:
2385         caam_jr_dev_uninit(dev);
2386         rte_cryptodev_pmd_release_device(dev);
2387 cleanup:
2388         free_job_ring(job_ring->uio_fd);
2389 init_error:
2390         CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
2391                         init_params->name);
2392
2393         return -ENXIO;
2394 }
2395
2396 /** Initialise CAAM JR crypto device */
2397 static int
2398 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
2399 {
2400         int ret;
2401
2402         struct rte_cryptodev_pmd_init_params init_params = {
2403                 "",
2404                 sizeof(struct sec_job_ring_t),
2405                 rte_socket_id(),
2406                 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2407         };
2408         const char *name;
2409         const char *input_args;
2410
2411         name = rte_vdev_device_name(vdev);
2412         if (name == NULL)
2413                 return -EINVAL;
2414
2415         input_args = rte_vdev_device_args(vdev);
2416         rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
2417
2418         ret = of_init();
2419         if (ret) {
2420                 RTE_LOG(ERR, PMD,
2421                 "of_init failed\n");
2422                 return -EINVAL;
2423         }
2424         /* if sec device version is not configured */
2425         if (!rta_get_sec_era()) {
2426                 const struct device_node *caam_node;
2427
2428                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2429                         const uint32_t *prop = of_get_property(caam_node,
2430                                         "fsl,sec-era",
2431                                         NULL);
2432                         if (prop) {
2433                                 rta_set_sec_era(
2434                                         INTL_SEC_ERA(rte_be_to_cpu_32(*prop)));
2435                                 break;
2436                         }
2437                 }
2438         }
2439 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
2440         if (rta_get_sec_era() > RTA_SEC_ERA_8) {
2441                 RTE_LOG(ERR, PMD,
2442                 "CAAM is compiled in BE mode for device with sec era > 8???\n");
2443                 return -EINVAL;
2444         }
2445 #endif
2446
2447         return caam_jr_dev_init(name, vdev, &init_params);
2448 }
2449
2450 /** Uninitialise CAAM JR crypto device */
2451 static int
2452 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
2453 {
2454         struct rte_cryptodev *cryptodev;
2455         const char *name;
2456
2457         name = rte_vdev_device_name(vdev);
2458         if (name == NULL)
2459                 return -EINVAL;
2460
2461         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2462         if (cryptodev == NULL)
2463                 return -ENODEV;
2464
2465         caam_jr_dev_uninit(cryptodev);
2466
2467         return rte_cryptodev_pmd_destroy(cryptodev);
2468 }
2469
2470 static void
2471 sec_job_rings_init(void)
2472 {
2473         int i;
2474
2475         for (i = 0; i < MAX_SEC_JOB_RINGS; i++)
2476                 g_job_rings[i].irq_fd = -1;
2477 }
2478
2479 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
2480         .probe = cryptodev_caam_jr_probe,
2481         .remove = cryptodev_caam_jr_remove
2482 };
2483
2484 static struct cryptodev_driver caam_jr_crypto_drv;
2485
2486 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
2487 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
2488         "max_nb_queue_pairs=<int>"
2489         "socket_id=<int>");
2490 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
2491                 cryptodev_driver_id);
2492
2493 RTE_INIT(caam_jr_init)
2494 {
2495         sec_uio_job_rings_init();
2496         sec_job_rings_init();
2497 }
2498
2499 RTE_LOG_REGISTER(caam_jr_logtype, pmd.crypto.caam, NOTICE);