net/ice: rework for generic flow enabling
[dpdk.git] / drivers / crypto / caam_jr / caam_jr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017-2019 NXP
3  */
4
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <sched.h>
8 #include <net/if.h>
9
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
19
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
26
27 /* RTA header files */
28 #include <desc/common.h>
29 #include <desc/algo.h>
30 #include <dpaa_of.h>
31 #ifdef RTE_LIBRTE_PMD_CAAM_JR_DEBUG
32 #define CAAM_JR_DBG    1
33 #else
34 #define CAAM_JR_DBG     0
35 #endif
36 #define CRYPTODEV_NAME_CAAM_JR_PMD      crypto_caam_jr
37 static uint8_t cryptodev_driver_id;
38 int caam_jr_logtype;
39
40 enum rta_sec_era rta_sec_era;
41
42 /* Lists the states possible for the SEC user space driver. */
43 enum sec_driver_state_e {
44         SEC_DRIVER_STATE_IDLE,          /* Driver not initialized */
45         SEC_DRIVER_STATE_STARTED,       /* Driver initialized and can be used*/
46         SEC_DRIVER_STATE_RELEASE,       /* Driver release is in progress */
47 };
48
49 /* Job rings used for communication with SEC HW */
50 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
51
52 /* The current state of SEC user space driver */
53 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
54
55 /* The number of job rings used by SEC user space driver */
56 static int g_job_rings_no;
57 static int g_job_rings_max;
58
59 struct sec_outring_entry {
60         phys_addr_t desc;       /* Pointer to completed descriptor */
61         uint32_t status;        /* Status for completed descriptor */
62 } __rte_packed;
63
64 /* virtual address conversin when mempool support is available for ctx */
65 static inline phys_addr_t
66 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
67 {
68         return (size_t)vaddr - ctx->vtop_offset;
69 }
70
71 static inline void
72 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
73 {
74         /* report op status to sym->op and then free the ctx memory  */
75         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
76 }
77
78 static inline struct caam_jr_op_ctx *
79 caam_jr_alloc_ctx(struct caam_jr_session *ses)
80 {
81         struct caam_jr_op_ctx *ctx;
82         int ret;
83
84         ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
85         if (!ctx || ret) {
86                 CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
87                 return NULL;
88         }
89         /*
90          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
91          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
92          * to clear all the SG entries. caam_jr_alloc_ctx() is called for
93          * each packet, memset is costlier than dcbz_64().
94          */
95         dcbz_64(&ctx->sg[SG_CACHELINE_0]);
96         dcbz_64(&ctx->sg[SG_CACHELINE_1]);
97         dcbz_64(&ctx->sg[SG_CACHELINE_2]);
98         dcbz_64(&ctx->sg[SG_CACHELINE_3]);
99
100         ctx->ctx_pool = ses->ctx_pool;
101         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
102
103         return ctx;
104 }
105
106 static
107 void caam_jr_stats_get(struct rte_cryptodev *dev,
108                         struct rte_cryptodev_stats *stats)
109 {
110         struct caam_jr_qp **qp = (struct caam_jr_qp **)
111                                         dev->data->queue_pairs;
112         int i;
113
114         PMD_INIT_FUNC_TRACE();
115         if (stats == NULL) {
116                 CAAM_JR_ERR("Invalid stats ptr NULL");
117                 return;
118         }
119         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
120                 if (qp[i] == NULL) {
121                         CAAM_JR_WARN("Uninitialised queue pair");
122                         continue;
123                 }
124
125                 stats->enqueued_count += qp[i]->tx_pkts;
126                 stats->dequeued_count += qp[i]->rx_pkts;
127                 stats->enqueue_err_count += qp[i]->tx_errs;
128                 stats->dequeue_err_count += qp[i]->rx_errs;
129                 CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
130                              "\n\tTX Ring Full = %" PRIu64,
131                              qp[i]->rx_poll_err,
132                              qp[i]->tx_ring_full);
133         }
134 }
135
136 static
137 void caam_jr_stats_reset(struct rte_cryptodev *dev)
138 {
139         int i;
140         struct caam_jr_qp **qp = (struct caam_jr_qp **)
141                                    (dev->data->queue_pairs);
142
143         PMD_INIT_FUNC_TRACE();
144         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
145                 if (qp[i] == NULL) {
146                         CAAM_JR_WARN("Uninitialised queue pair");
147                         continue;
148                 }
149                 qp[i]->rx_pkts = 0;
150                 qp[i]->rx_errs = 0;
151                 qp[i]->rx_poll_err = 0;
152                 qp[i]->tx_pkts = 0;
153                 qp[i]->tx_errs = 0;
154                 qp[i]->tx_ring_full = 0;
155         }
156 }
157
158 static inline int
159 is_cipher_only(struct caam_jr_session *ses)
160 {
161         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
162                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
163 }
164
165 static inline int
166 is_auth_only(struct caam_jr_session *ses)
167 {
168         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
169                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
170 }
171
172 static inline int
173 is_aead(struct caam_jr_session *ses)
174 {
175         return ((ses->cipher_alg == 0) &&
176                 (ses->auth_alg == 0) &&
177                 (ses->aead_alg != 0));
178 }
179
180 static inline int
181 is_auth_cipher(struct caam_jr_session *ses)
182 {
183         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
184                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
185                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
186 }
187
188 static inline int
189 is_proto_ipsec(struct caam_jr_session *ses)
190 {
191         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
192 }
193
194 static inline int
195 is_encode(struct caam_jr_session *ses)
196 {
197         return ses->dir == DIR_ENC;
198 }
199
200 static inline int
201 is_decode(struct caam_jr_session *ses)
202 {
203         return ses->dir == DIR_DEC;
204 }
205
206 static inline void
207 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
208 {
209         switch (ses->auth_alg) {
210         case RTE_CRYPTO_AUTH_NULL:
211                 ses->digest_length = 0;
212                 break;
213         case RTE_CRYPTO_AUTH_MD5_HMAC:
214                 alginfo_a->algtype =
215                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
216                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
217                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
218                 break;
219         case RTE_CRYPTO_AUTH_SHA1_HMAC:
220                 alginfo_a->algtype =
221                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
222                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
223                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
224                 break;
225         case RTE_CRYPTO_AUTH_SHA224_HMAC:
226                 alginfo_a->algtype =
227                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
228                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
229                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
230                 break;
231         case RTE_CRYPTO_AUTH_SHA256_HMAC:
232                 alginfo_a->algtype =
233                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
234                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
235                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
236                 break;
237         case RTE_CRYPTO_AUTH_SHA384_HMAC:
238                 alginfo_a->algtype =
239                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
240                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
241                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
242                 break;
243         case RTE_CRYPTO_AUTH_SHA512_HMAC:
244                 alginfo_a->algtype =
245                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
246                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
247                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
248                 break;
249         default:
250                 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
251         }
252 }
253
254 static inline void
255 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
256 {
257         switch (ses->cipher_alg) {
258         case RTE_CRYPTO_CIPHER_NULL:
259                 break;
260         case RTE_CRYPTO_CIPHER_AES_CBC:
261                 alginfo_c->algtype =
262                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
263                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
264                 alginfo_c->algmode = OP_ALG_AAI_CBC;
265                 break;
266         case RTE_CRYPTO_CIPHER_3DES_CBC:
267                 alginfo_c->algtype =
268                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
269                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
270                 alginfo_c->algmode = OP_ALG_AAI_CBC;
271                 break;
272         case RTE_CRYPTO_CIPHER_AES_CTR:
273                 alginfo_c->algtype =
274                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
275                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
276                 alginfo_c->algmode = OP_ALG_AAI_CTR;
277                 break;
278         default:
279                 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
280         }
281 }
282
283 static inline void
284 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
285 {
286         switch (ses->aead_alg) {
287         case RTE_CRYPTO_AEAD_AES_GCM:
288                 alginfo->algtype = OP_ALG_ALGSEL_AES;
289                 alginfo->algmode = OP_ALG_AAI_GCM;
290                 break;
291         default:
292                 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
293         }
294 }
295
296 /* prepare command block of the session */
297 static int
298 caam_jr_prep_cdb(struct caam_jr_session *ses)
299 {
300         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
301         int32_t shared_desc_len = 0;
302         struct sec_cdb *cdb;
303         int err;
304 #if CAAM_BYTE_ORDER == CORE_BYTE_ORDER
305         int swap = false;
306 #else
307         int swap = true;
308 #endif
309
310         if (ses->cdb)
311                 caam_jr_dma_free(ses->cdb);
312
313         cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
314         if (!cdb) {
315                 CAAM_JR_ERR("failed to allocate memory for cdb\n");
316                 return -1;
317         }
318
319         ses->cdb = cdb;
320
321         memset(cdb, 0, sizeof(struct sec_cdb));
322
323         if (is_cipher_only(ses)) {
324                 caam_cipher_alg(ses, &alginfo_c);
325                 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
326                         CAAM_JR_ERR("not supported cipher alg");
327                         rte_free(cdb);
328                         return -ENOTSUP;
329                 }
330
331                 alginfo_c.key = (size_t)ses->cipher_key.data;
332                 alginfo_c.keylen = ses->cipher_key.length;
333                 alginfo_c.key_enc_flags = 0;
334                 alginfo_c.key_type = RTA_DATA_IMM;
335
336                 shared_desc_len = cnstr_shdsc_blkcipher(
337                                                 cdb->sh_desc, true,
338                                                 swap, SHR_NEVER, &alginfo_c,
339                                                 NULL,
340                                                 ses->iv.length,
341                                                 ses->dir);
342         } else if (is_auth_only(ses)) {
343                 caam_auth_alg(ses, &alginfo_a);
344                 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
345                         CAAM_JR_ERR("not supported auth alg");
346                         rte_free(cdb);
347                         return -ENOTSUP;
348                 }
349
350                 alginfo_a.key = (size_t)ses->auth_key.data;
351                 alginfo_a.keylen = ses->auth_key.length;
352                 alginfo_a.key_enc_flags = 0;
353                 alginfo_a.key_type = RTA_DATA_IMM;
354
355                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
356                                                    swap, SHR_NEVER, &alginfo_a,
357                                                    !ses->dir,
358                                                    ses->digest_length);
359         } else if (is_aead(ses)) {
360                 caam_aead_alg(ses, &alginfo);
361                 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
362                         CAAM_JR_ERR("not supported aead alg");
363                         rte_free(cdb);
364                         return -ENOTSUP;
365                 }
366                 alginfo.key = (size_t)ses->aead_key.data;
367                 alginfo.keylen = ses->aead_key.length;
368                 alginfo.key_enc_flags = 0;
369                 alginfo.key_type = RTA_DATA_IMM;
370
371                 if (ses->dir == DIR_ENC)
372                         shared_desc_len = cnstr_shdsc_gcm_encap(
373                                         cdb->sh_desc, true, swap,
374                                         SHR_NEVER, &alginfo,
375                                         ses->iv.length,
376                                         ses->digest_length);
377                 else
378                         shared_desc_len = cnstr_shdsc_gcm_decap(
379                                         cdb->sh_desc, true, swap,
380                                         SHR_NEVER, &alginfo,
381                                         ses->iv.length,
382                                         ses->digest_length);
383         } else {
384                 caam_cipher_alg(ses, &alginfo_c);
385                 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
386                         CAAM_JR_ERR("not supported cipher alg");
387                         rte_free(cdb);
388                         return -ENOTSUP;
389                 }
390
391                 alginfo_c.key = (size_t)ses->cipher_key.data;
392                 alginfo_c.keylen = ses->cipher_key.length;
393                 alginfo_c.key_enc_flags = 0;
394                 alginfo_c.key_type = RTA_DATA_IMM;
395
396                 caam_auth_alg(ses, &alginfo_a);
397                 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
398                         CAAM_JR_ERR("not supported auth alg");
399                         rte_free(cdb);
400                         return -ENOTSUP;
401                 }
402
403                 alginfo_a.key = (size_t)ses->auth_key.data;
404                 alginfo_a.keylen = ses->auth_key.length;
405                 alginfo_a.key_enc_flags = 0;
406                 alginfo_a.key_type = RTA_DATA_IMM;
407
408                 cdb->sh_desc[0] = alginfo_c.keylen;
409                 cdb->sh_desc[1] = alginfo_a.keylen;
410                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
411                                        MIN_JOB_DESC_SIZE,
412                                        (unsigned int *)cdb->sh_desc,
413                                        &cdb->sh_desc[2], 2);
414
415                 if (err < 0) {
416                         CAAM_JR_ERR("Crypto: Incorrect key lengths");
417                         rte_free(cdb);
418                         return err;
419                 }
420                 if (cdb->sh_desc[2] & 1)
421                         alginfo_c.key_type = RTA_DATA_IMM;
422                 else {
423                         alginfo_c.key = (size_t)caam_jr_mem_vtop(
424                                                 (void *)(size_t)alginfo_c.key);
425                         alginfo_c.key_type = RTA_DATA_PTR;
426                 }
427                 if (cdb->sh_desc[2] & (1<<1))
428                         alginfo_a.key_type = RTA_DATA_IMM;
429                 else {
430                         alginfo_a.key = (size_t)caam_jr_mem_vtop(
431                                                 (void *)(size_t)alginfo_a.key);
432                         alginfo_a.key_type = RTA_DATA_PTR;
433                 }
434                 cdb->sh_desc[0] = 0;
435                 cdb->sh_desc[1] = 0;
436                 cdb->sh_desc[2] = 0;
437                 if (is_proto_ipsec(ses)) {
438                         if (ses->dir == DIR_ENC) {
439                                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
440                                                 cdb->sh_desc,
441                                                 true, swap, SHR_SERIAL,
442                                                 &ses->encap_pdb,
443                                                 (uint8_t *)&ses->ip4_hdr,
444                                                 &alginfo_c, &alginfo_a);
445                         } else if (ses->dir == DIR_DEC) {
446                                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
447                                                 cdb->sh_desc,
448                                                 true, swap, SHR_SERIAL,
449                                                 &ses->decap_pdb,
450                                                 &alginfo_c, &alginfo_a);
451                         }
452                 } else {
453                         /* Auth_only_len is overwritten in fd for each job */
454                         shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
455                                         true, swap, SHR_SERIAL,
456                                         &alginfo_c, &alginfo_a,
457                                         ses->iv.length,
458                                         ses->digest_length, ses->dir);
459                 }
460         }
461
462         if (shared_desc_len < 0) {
463                 CAAM_JR_ERR("error in preparing command block");
464                 return shared_desc_len;
465         }
466
467 #if CAAM_JR_DBG
468         SEC_DUMP_DESC(cdb->sh_desc);
469 #endif
470
471         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
472
473         return 0;
474 }
475
476 /* @brief Poll the HW for already processed jobs in the JR
477  * and silently discard the available jobs or notify them to UA
478  * with indicated error code.
479  *
480  * @param [in,out]  job_ring        The job ring to poll.
481  * @param [in]  do_notify           Can be #TRUE or #FALSE. Indicates if
482  *                                  descriptors are to be discarded
483  *                                  or notified to UA with given error_code.
484  * @param [out] notified_descs    Number of notified descriptors. Can be NULL
485  *                                      if do_notify is #FALSE
486  */
487 static void
488 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
489                   uint32_t do_notify,
490                   uint32_t *notified_descs)
491 {
492         int32_t jobs_no_to_discard = 0;
493         int32_t discarded_descs_no = 0;
494
495         CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
496                 job_ring, job_ring->pidx, job_ring->cidx, do_notify);
497
498         jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
499
500         /* Discard all jobs */
501         CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
502                   job_ring, job_ring->pidx, job_ring->cidx,
503                   jobs_no_to_discard);
504
505         while (jobs_no_to_discard > discarded_descs_no) {
506                 discarded_descs_no++;
507                 /* Now increment the consumer index for the current job ring,
508                  * AFTER saving job in temporary location!
509                  * Increment the consumer index for the current job ring
510                  */
511                 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
512                                          SEC_JOB_RING_SIZE);
513
514                 hw_remove_entries(job_ring, 1);
515         }
516
517         if (do_notify == true) {
518                 ASSERT(notified_descs != NULL);
519                 *notified_descs = discarded_descs_no;
520         }
521 }
522
523 /* @brief Poll the HW for already processed jobs in the JR
524  * and notify the available jobs to UA.
525  *
526  * @param [in]  job_ring        The job ring to poll.
527  * @param [in]  limit           The maximum number of jobs to notify.
528  *                              If set to negative value, all available jobs are
529  *                              notified.
530  *
531  * @retval >=0 for No of jobs notified to UA.
532  * @retval -1 for error
533  */
534 static int
535 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
536                  struct rte_crypto_op **ops, int32_t limit,
537                  struct caam_jr_qp *jr_qp)
538 {
539         int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
540         int32_t number_of_jobs_available = 0;
541         int32_t notified_descs_no = 0;
542         uint32_t sec_error_code = 0;
543         struct job_descriptor *current_desc;
544         phys_addr_t current_desc_addr;
545         phys_addr_t *temp_addr;
546         struct caam_jr_op_ctx *ctx;
547
548         /* TODO check for ops have memory*/
549         /* check here if any JR error that cannot be written
550          * in the output status word has occurred
551          */
552         if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
553                 CAAM_JR_INFO("err received");
554                 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
555                                         GET_JR_REG(JRINT, job_ring));
556                 if (unlikely(sec_error_code)) {
557                         hw_job_ring_error_print(job_ring, sec_error_code);
558                         return -1;
559                 }
560         }
561         /* compute the number of jobs available in the job ring based on the
562          * producer and consumer index values.
563          */
564         number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
565         /* Compute the number of notifications that need to be raised to UA
566          * If limit > total number of done jobs -> notify all done jobs
567          * If limit = 0 -> error
568          * If limit < total number of done jobs -> notify a number
569          * of done jobs equal with limit
570          */
571         jobs_no_to_notify = (limit > number_of_jobs_available) ?
572                                 number_of_jobs_available : limit;
573         CAAM_JR_DP_DEBUG(
574                 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
575                 job_ring, job_ring->pidx, job_ring->cidx,
576                 limit, number_of_jobs_available, jobs_no_to_notify);
577
578         rte_smp_rmb();
579
580         while (jobs_no_to_notify > notified_descs_no) {
581                 static uint64_t false_alarm;
582                 static uint64_t real_poll;
583
584                 /* Get job status here */
585                 sec_error_code = job_ring->output_ring[job_ring->cidx].status;
586                 /* Get completed descriptor */
587                 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
588                 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
589
590                 real_poll++;
591                 /* todo check if it is false alarm no desc present */
592                 if (!current_desc_addr) {
593                         false_alarm++;
594                         printf("false alarm %" PRIu64 "real %" PRIu64
595                                 " sec_err =0x%x cidx Index =0%d\n",
596                                 false_alarm, real_poll,
597                                 sec_error_code, job_ring->cidx);
598                         rte_panic("CAAM JR descriptor NULL");
599                         return notified_descs_no;
600                 }
601                 current_desc = (struct job_descriptor *)
602                                 caam_jr_dma_ptov(current_desc_addr);
603                 /* now increment the consumer index for the current job ring,
604                  * AFTER saving job in temporary location!
605                  */
606                 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
607                                  SEC_JOB_RING_SIZE);
608                 /* Signal that the job has been processed and the slot is free*/
609                 hw_remove_entries(job_ring, 1);
610                 /*TODO for multiple ops, packets*/
611                 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
612                 if (unlikely(sec_error_code)) {
613                         CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
614                                 job_ring->cidx, sec_error_code);
615                         hw_handle_job_ring_error(job_ring, sec_error_code);
616                         //todo improve with exact errors
617                         ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
618                         jr_qp->rx_errs++;
619                 } else {
620                         ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
621 #if CAAM_JR_DBG
622                         if (ctx->op->sym->m_dst) {
623                                 rte_hexdump(stdout, "PROCESSED",
624                                 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
625                                 rte_pktmbuf_data_len(ctx->op->sym->m_dst));
626                         } else {
627                                 rte_hexdump(stdout, "PROCESSED",
628                                 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
629                                 rte_pktmbuf_data_len(ctx->op->sym->m_src));
630                         }
631 #endif
632                 }
633                 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
634                         struct ip *ip4_hdr;
635
636                         if (ctx->op->sym->m_dst) {
637                                 /*TODO check for ip header or other*/
638                                 ip4_hdr = (struct ip *)
639                                 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
640                                 ctx->op->sym->m_dst->pkt_len =
641                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
642                                 ctx->op->sym->m_dst->data_len =
643                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
644                         } else {
645                                 ip4_hdr = (struct ip *)
646                                 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
647                                 ctx->op->sym->m_src->pkt_len =
648                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
649                                 ctx->op->sym->m_src->data_len =
650                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
651                         }
652                 }
653                 *ops = ctx->op;
654                 caam_jr_op_ending(ctx);
655                 ops++;
656                 notified_descs_no++;
657         }
658         return notified_descs_no;
659 }
660
661 static uint16_t
662 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
663                        uint16_t nb_ops)
664 {
665         struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
666         struct sec_job_ring_t *ring = jr_qp->ring;
667         int num_rx;
668         int ret;
669
670         CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
671
672         /* Poll job ring
673          * If nb_ops < 0 -> poll JR until no more notifications are available.
674          * If nb_ops > 0 -> poll JR until limit is reached.
675          */
676
677         /* Run hw poll job ring */
678         num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
679         if (num_rx < 0) {
680                 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
681                 return 0;
682         }
683
684         CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
685
686         if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
687                 if (num_rx < nb_ops) {
688                         ret = caam_jr_enable_irqs(ring->irq_fd);
689                         SEC_ASSERT(ret == 0, ret,
690                         "Failed to enable irqs for job ring %p", ring);
691                 }
692         } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
693
694                 /* Always enable IRQ generation when in pure IRQ mode */
695                 ret = caam_jr_enable_irqs(ring->irq_fd);
696                 SEC_ASSERT(ret == 0, ret,
697                         "Failed to enable irqs for job ring %p", ring);
698         }
699
700         jr_qp->rx_pkts += num_rx;
701
702         return num_rx;
703 }
704
705 /**
706  * packet looks like:
707  *              |<----data_len------->|
708  *    |ip_header|ah_header|icv|payload|
709  *              ^
710  *              |
711  *         mbuf->pkt.data
712  */
713 static inline struct caam_jr_op_ctx *
714 build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
715 {
716         struct rte_crypto_sym_op *sym = op->sym;
717         struct rte_mbuf *mbuf = sym->m_src;
718         struct caam_jr_op_ctx *ctx;
719         struct sec4_sg_entry *sg;
720         int     length;
721         struct sec_cdb *cdb;
722         uint64_t sdesc_offset;
723         struct sec_job_descriptor_t *jobdescr;
724         uint8_t extra_segs;
725
726         if (is_decode(ses))
727                 extra_segs = 2;
728         else
729                 extra_segs = 1;
730
731         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
732                 CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
733                                 MAX_SG_ENTRIES);
734                 return NULL;
735         }
736
737         ctx = caam_jr_alloc_ctx(ses);
738         if (!ctx)
739                 return NULL;
740
741         ctx->op = op;
742
743         cdb = ses->cdb;
744         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
745
746         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
747
748         SEC_JD_INIT(jobdescr);
749         SEC_JD_SET_SD(jobdescr,
750                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
751                 cdb->sh_hdr.hi.field.idlen);
752
753         /* output */
754         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
755                         0, ses->digest_length);
756
757         /*input */
758         sg = &ctx->sg[0];
759         length = sym->auth.data.length;
760         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
761         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
762
763         /* Successive segs */
764         mbuf = mbuf->next;
765         while (mbuf) {
766                 sg++;
767                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
768                 sg->len = cpu_to_caam32(mbuf->data_len);
769                 mbuf = mbuf->next;
770         }
771
772         if (is_decode(ses)) {
773                 /* digest verification case */
774                 sg++;
775                 /* hash result or digest, save digest first */
776                 rte_memcpy(ctx->digest, sym->auth.digest.data,
777                            ses->digest_length);
778 #if CAAM_JR_DBG
779                 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
780 #endif
781                 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
782                 sg->len = cpu_to_caam32(ses->digest_length);
783                 length += ses->digest_length;
784         } else {
785                 sg->len -= ses->digest_length;
786         }
787
788         /* last element*/
789         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
790
791         SEC_JD_SET_IN_PTR(jobdescr,
792                 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
793         /* enabling sg list */
794         (jobdescr)->seq_in.command.word  |= 0x01000000;
795
796         return ctx;
797 }
798
799 static inline struct caam_jr_op_ctx *
800 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
801 {
802         struct rte_crypto_sym_op *sym = op->sym;
803         struct caam_jr_op_ctx *ctx;
804         struct sec4_sg_entry *sg;
805         rte_iova_t start_addr;
806         struct sec_cdb *cdb;
807         uint64_t sdesc_offset;
808         struct sec_job_descriptor_t *jobdescr;
809
810         ctx = caam_jr_alloc_ctx(ses);
811         if (!ctx)
812                 return NULL;
813
814         ctx->op = op;
815
816         cdb = ses->cdb;
817         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
818
819         start_addr = rte_pktmbuf_iova(sym->m_src);
820
821         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
822
823         SEC_JD_INIT(jobdescr);
824         SEC_JD_SET_SD(jobdescr,
825                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
826                 cdb->sh_hdr.hi.field.idlen);
827
828         /* output */
829         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
830                         0, ses->digest_length);
831
832         /*input */
833         if (is_decode(ses)) {
834                 sg = &ctx->sg[0];
835                 SEC_JD_SET_IN_PTR(jobdescr,
836                         (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
837                         (sym->auth.data.length + ses->digest_length));
838                 /* enabling sg list */
839                 (jobdescr)->seq_in.command.word  |= 0x01000000;
840
841                 /* hash result or digest, save digest first */
842                 rte_memcpy(ctx->digest, sym->auth.digest.data,
843                            ses->digest_length);
844                 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
845                 sg->len = cpu_to_caam32(sym->auth.data.length);
846
847 #if CAAM_JR_DBG
848                 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
849 #endif
850                 /* let's check digest by hw */
851                 sg++;
852                 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
853                 sg->len = cpu_to_caam32(ses->digest_length);
854                 /* last element*/
855                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
856         } else {
857                 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
858                         sym->auth.data.offset, sym->auth.data.length);
859         }
860         return ctx;
861 }
862
863 static inline struct caam_jr_op_ctx *
864 build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
865 {
866         struct rte_crypto_sym_op *sym = op->sym;
867         struct rte_mbuf *mbuf = sym->m_src;
868         struct caam_jr_op_ctx *ctx;
869         struct sec4_sg_entry *sg, *in_sg;
870         int length;
871         struct sec_cdb *cdb;
872         uint64_t sdesc_offset;
873         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
874                         ses->iv.offset);
875         struct sec_job_descriptor_t *jobdescr;
876         uint8_t reg_segs;
877
878         if (sym->m_dst) {
879                 mbuf = sym->m_dst;
880                 reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
881         } else {
882                 mbuf = sym->m_src;
883                 reg_segs = mbuf->nb_segs * 2 + 2;
884         }
885
886         if (reg_segs > MAX_SG_ENTRIES) {
887                 CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
888                                 MAX_SG_ENTRIES);
889                 return NULL;
890         }
891
892         ctx = caam_jr_alloc_ctx(ses);
893         if (!ctx)
894                 return NULL;
895
896         ctx->op = op;
897         cdb = ses->cdb;
898         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
899
900         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
901
902         SEC_JD_INIT(jobdescr);
903         SEC_JD_SET_SD(jobdescr,
904                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
905                 cdb->sh_hdr.hi.field.idlen);
906
907 #if CAAM_JR_DBG
908         CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
909                         sym->m_src->data_off, sym->cipher.data.offset,
910                         sym->cipher.data.length, ses->iv.length);
911 #endif
912         /* output */
913         if (sym->m_dst)
914                 mbuf = sym->m_dst;
915         else
916                 mbuf = sym->m_src;
917
918         sg = &ctx->sg[0];
919         length = sym->cipher.data.length;
920
921         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
922                 + sym->cipher.data.offset);
923         sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
924
925         /* Successive segs */
926         mbuf = mbuf->next;
927         while (mbuf) {
928                 sg++;
929                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
930                 sg->len = cpu_to_caam32(mbuf->data_len);
931                 mbuf = mbuf->next;
932         }
933         /* last element*/
934         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
935
936         SEC_JD_SET_OUT_PTR(jobdescr,
937                         (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
938                         length);
939         /*enabling sg bit */
940         (jobdescr)->seq_out.command.word  |= 0x01000000;
941
942         /*input */
943         sg++;
944         mbuf = sym->m_src;
945         in_sg = sg;
946
947         length = sym->cipher.data.length + ses->iv.length;
948
949         /* IV */
950         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
951         sg->len = cpu_to_caam32(ses->iv.length);
952
953         /* 1st seg */
954         sg++;
955         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
956                                 + sym->cipher.data.offset);
957         sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
958
959         /* Successive segs */
960         mbuf = mbuf->next;
961         while (mbuf) {
962                 sg++;
963                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
964                 sg->len = cpu_to_caam32(mbuf->data_len);
965                 mbuf = mbuf->next;
966         }
967         /* last element*/
968         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
969
970
971         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
972                                 length);
973         /*enabling sg bit */
974         (jobdescr)->seq_in.command.word  |= 0x01000000;
975
976         return ctx;
977 }
978
979 static inline struct caam_jr_op_ctx *
980 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
981 {
982         struct rte_crypto_sym_op *sym = op->sym;
983         struct caam_jr_op_ctx *ctx;
984         struct sec4_sg_entry *sg;
985         rte_iova_t src_start_addr, dst_start_addr;
986         struct sec_cdb *cdb;
987         uint64_t sdesc_offset;
988         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
989                         ses->iv.offset);
990         struct sec_job_descriptor_t *jobdescr;
991
992         ctx = caam_jr_alloc_ctx(ses);
993         if (!ctx)
994                 return NULL;
995
996         ctx->op = op;
997         cdb = ses->cdb;
998         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
999
1000         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1001         if (sym->m_dst)
1002                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1003         else
1004                 dst_start_addr = src_start_addr;
1005
1006         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1007
1008         SEC_JD_INIT(jobdescr);
1009         SEC_JD_SET_SD(jobdescr,
1010                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1011                 cdb->sh_hdr.hi.field.idlen);
1012
1013 #if CAAM_JR_DBG
1014         CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
1015                         sym->m_src->data_off, sym->cipher.data.offset,
1016                         sym->cipher.data.length, ses->iv.length);
1017 #endif
1018         /* output */
1019         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
1020                         sym->cipher.data.offset,
1021                         sym->cipher.data.length + ses->iv.length);
1022
1023         /*input */
1024         sg = &ctx->sg[0];
1025         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
1026                                 sym->cipher.data.length + ses->iv.length);
1027         /*enabling sg bit */
1028         (jobdescr)->seq_in.command.word  |= 0x01000000;
1029
1030         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1031         sg->len = cpu_to_caam32(ses->iv.length);
1032
1033         sg = &ctx->sg[1];
1034         sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
1035         sg->len = cpu_to_caam32(sym->cipher.data.length);
1036         /* last element*/
1037         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1038
1039         return ctx;
1040 }
1041
1042 /* For decapsulation:
1043  *     Input:
1044  * +----+----------------+--------------------------------+-----+
1045  * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
1046  * +----+----------------+--------------------------------+-----+
1047  *     Output:
1048  * +----+--------------------------+
1049  * | Decrypted & authenticated data |
1050  * +----+--------------------------+
1051  */
1052
1053 static inline struct caam_jr_op_ctx *
1054 build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
1055 {
1056         struct rte_crypto_sym_op *sym = op->sym;
1057         struct caam_jr_op_ctx *ctx;
1058         struct sec4_sg_entry *sg, *out_sg, *in_sg;
1059         struct rte_mbuf *mbuf;
1060         uint32_t length = 0;
1061         struct sec_cdb *cdb;
1062         uint64_t sdesc_offset;
1063         uint8_t req_segs;
1064         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1065                         ses->iv.offset);
1066         struct sec_job_descriptor_t *jobdescr;
1067         uint16_t auth_hdr_len = sym->cipher.data.offset -
1068                         sym->auth.data.offset;
1069         uint16_t auth_tail_len = sym->auth.data.length -
1070                         sym->cipher.data.length - auth_hdr_len;
1071         uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1072
1073         if (sym->m_dst) {
1074                 mbuf = sym->m_dst;
1075                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1076         } else {
1077                 mbuf = sym->m_src;
1078                 req_segs = mbuf->nb_segs * 2 + 3;
1079         }
1080
1081         if (req_segs > MAX_SG_ENTRIES) {
1082                 CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1083                                 MAX_SG_ENTRIES);
1084                 return NULL;
1085         }
1086
1087         ctx = caam_jr_alloc_ctx(ses);
1088         if (!ctx)
1089                 return NULL;
1090
1091         ctx->op = op;
1092         cdb = ses->cdb;
1093         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1094
1095         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1096
1097         SEC_JD_INIT(jobdescr);
1098         SEC_JD_SET_SD(jobdescr,
1099                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1100                 cdb->sh_hdr.hi.field.idlen);
1101
1102         /* output */
1103         if (sym->m_dst)
1104                 mbuf = sym->m_dst;
1105         else
1106                 mbuf = sym->m_src;
1107
1108         out_sg = &ctx->sg[0];
1109         if (is_encode(ses))
1110                 length = sym->auth.data.length + ses->digest_length;
1111         else
1112                 length = sym->auth.data.length;
1113
1114         sg = &ctx->sg[0];
1115
1116         /* 1st seg */
1117         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1118                 + sym->auth.data.offset);
1119         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1120
1121         /* Successive segs */
1122         mbuf = mbuf->next;
1123         while (mbuf) {
1124                 sg++;
1125                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1126                 sg->len = cpu_to_caam32(mbuf->data_len);
1127                 mbuf = mbuf->next;
1128         }
1129
1130         if (is_encode(ses)) {
1131                 /* set auth output */
1132                 sg++;
1133                 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1134                 sg->len = cpu_to_caam32(ses->digest_length);
1135         }
1136         /* last element*/
1137         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1138
1139         SEC_JD_SET_OUT_PTR(jobdescr,
1140                            (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
1141         /* set sg bit */
1142         (jobdescr)->seq_out.command.word  |= 0x01000000;
1143
1144         /* input */
1145         sg++;
1146         mbuf = sym->m_src;
1147         in_sg = sg;
1148         if (is_encode(ses))
1149                 length = ses->iv.length + sym->auth.data.length;
1150         else
1151                 length = ses->iv.length + sym->auth.data.length
1152                                                 + ses->digest_length;
1153
1154         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1155         sg->len = cpu_to_caam32(ses->iv.length);
1156
1157         sg++;
1158         /* 1st seg */
1159         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1160                 + sym->auth.data.offset);
1161         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1162
1163         /* Successive segs */
1164         mbuf = mbuf->next;
1165         while (mbuf) {
1166                 sg++;
1167                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1168                 sg->len = cpu_to_caam32(mbuf->data_len);
1169                 mbuf = mbuf->next;
1170         }
1171
1172         if (is_decode(ses)) {
1173                 sg++;
1174                 rte_memcpy(ctx->digest, sym->auth.digest.data,
1175                        ses->digest_length);
1176                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1177                 sg->len = cpu_to_caam32(ses->digest_length);
1178         }
1179         /* last element*/
1180         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1181
1182         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
1183                                 length);
1184         /* set sg bit */
1185         (jobdescr)->seq_in.command.word  |= 0x01000000;
1186         /* Auth_only_len is set as 0 in descriptor and it is
1187          * overwritten here in the jd which will update
1188          * the DPOVRD reg.
1189          */
1190         if (auth_only_len)
1191                 /* set sg bit */
1192                 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1193
1194         return ctx;
1195 }
1196
1197 static inline struct caam_jr_op_ctx *
1198 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
1199 {
1200         struct rte_crypto_sym_op *sym = op->sym;
1201         struct caam_jr_op_ctx *ctx;
1202         struct sec4_sg_entry *sg;
1203         rte_iova_t src_start_addr, dst_start_addr;
1204         uint32_t length = 0;
1205         struct sec_cdb *cdb;
1206         uint64_t sdesc_offset;
1207         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1208                         ses->iv.offset);
1209         struct sec_job_descriptor_t *jobdescr;
1210         uint16_t auth_hdr_len = sym->cipher.data.offset -
1211                         sym->auth.data.offset;
1212         uint16_t auth_tail_len = sym->auth.data.length -
1213                         sym->cipher.data.length - auth_hdr_len;
1214         uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1215
1216         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1217         if (sym->m_dst)
1218                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1219         else
1220                 dst_start_addr = src_start_addr;
1221
1222         ctx = caam_jr_alloc_ctx(ses);
1223         if (!ctx)
1224                 return NULL;
1225
1226         ctx->op = op;
1227         cdb = ses->cdb;
1228         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1229
1230         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1231
1232         SEC_JD_INIT(jobdescr);
1233         SEC_JD_SET_SD(jobdescr,
1234                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1235                 cdb->sh_hdr.hi.field.idlen);
1236
1237         /* input */
1238         sg = &ctx->sg[0];
1239         if (is_encode(ses)) {
1240                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1241                 sg->len = cpu_to_caam32(ses->iv.length);
1242                 length += ses->iv.length;
1243
1244                 sg++;
1245                 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1246                 sg->len = cpu_to_caam32(sym->auth.data.length);
1247                 length += sym->auth.data.length;
1248                 /* last element*/
1249                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1250         } else {
1251                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1252                 sg->len = cpu_to_caam32(ses->iv.length);
1253                 length += ses->iv.length;
1254
1255                 sg++;
1256                 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1257                 sg->len = cpu_to_caam32(sym->auth.data.length);
1258                 length += sym->auth.data.length;
1259
1260                 rte_memcpy(ctx->digest, sym->auth.digest.data,
1261                        ses->digest_length);
1262                 sg++;
1263                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1264                 sg->len = cpu_to_caam32(ses->digest_length);
1265                 length += ses->digest_length;
1266                 /* last element*/
1267                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1268         }
1269
1270         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
1271                                 length);
1272         /* set sg bit */
1273         (jobdescr)->seq_in.command.word  |= 0x01000000;
1274
1275         /* output */
1276         sg = &ctx->sg[6];
1277
1278         sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
1279         sg->len = cpu_to_caam32(sym->cipher.data.length);
1280         length = sym->cipher.data.length;
1281
1282         if (is_encode(ses)) {
1283                 /* set auth output */
1284                 sg++;
1285                 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1286                 sg->len = cpu_to_caam32(ses->digest_length);
1287                 length += ses->digest_length;
1288         }
1289         /* last element*/
1290         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1291
1292         SEC_JD_SET_OUT_PTR(jobdescr,
1293                            (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
1294         /* set sg bit */
1295         (jobdescr)->seq_out.command.word  |= 0x01000000;
1296
1297         /* Auth_only_len is set as 0 in descriptor and it is
1298          * overwritten here in the jd which will update
1299          * the DPOVRD reg.
1300          */
1301         if (auth_only_len)
1302                 /* set sg bit */
1303                 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1304
1305         return ctx;
1306 }
1307
1308 static inline struct caam_jr_op_ctx *
1309 build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
1310 {
1311         struct rte_crypto_sym_op *sym = op->sym;
1312         struct caam_jr_op_ctx *ctx = NULL;
1313         phys_addr_t src_start_addr, dst_start_addr;
1314         struct sec_cdb *cdb;
1315         uint64_t sdesc_offset;
1316         struct sec_job_descriptor_t *jobdescr;
1317
1318         ctx = caam_jr_alloc_ctx(ses);
1319         if (!ctx)
1320                 return NULL;
1321         ctx->op = op;
1322
1323         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1324         if (sym->m_dst)
1325                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1326         else
1327                 dst_start_addr = src_start_addr;
1328
1329         cdb = ses->cdb;
1330         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1331
1332         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1333
1334         SEC_JD_INIT(jobdescr);
1335         SEC_JD_SET_SD(jobdescr,
1336                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1337                         cdb->sh_hdr.hi.field.idlen);
1338
1339         /* output */
1340         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
1341                         sym->m_src->buf_len - sym->m_src->data_off);
1342         /* input */
1343         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
1344                         sym->m_src->pkt_len);
1345         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1346
1347         return ctx;
1348 }
1349
1350 static int
1351 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
1352 {
1353         struct sec_job_ring_t *ring = qp->ring;
1354         struct caam_jr_session *ses;
1355         struct caam_jr_op_ctx *ctx = NULL;
1356         struct sec_job_descriptor_t *jobdescr __rte_unused;
1357
1358         switch (op->sess_type) {
1359         case RTE_CRYPTO_OP_WITH_SESSION:
1360                 ses = (struct caam_jr_session *)
1361                 get_sym_session_private_data(op->sym->session,
1362                                         cryptodev_driver_id);
1363                 break;
1364         case RTE_CRYPTO_OP_SECURITY_SESSION:
1365                 ses = (struct caam_jr_session *)
1366                         get_sec_session_private_data(
1367                                         op->sym->sec_session);
1368                 break;
1369         default:
1370                 CAAM_JR_DP_ERR("sessionless crypto op not supported");
1371                 qp->tx_errs++;
1372                 return -1;
1373         }
1374
1375         if (unlikely(!ses->qp || ses->qp != qp)) {
1376                 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
1377                 ses->qp = qp;
1378                 caam_jr_prep_cdb(ses);
1379         }
1380
1381         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1382                 if (is_auth_cipher(ses))
1383                         ctx = build_cipher_auth(op, ses);
1384                 else if (is_aead(ses))
1385                         goto err1;
1386                 else if (is_auth_only(ses))
1387                         ctx = build_auth_only(op, ses);
1388                 else if (is_cipher_only(ses))
1389                         ctx = build_cipher_only(op, ses);
1390                 else if (is_proto_ipsec(ses))
1391                         ctx = build_proto(op, ses);
1392         } else {
1393                 if (is_auth_cipher(ses))
1394                         ctx = build_cipher_auth_sg(op, ses);
1395                 else if (is_aead(ses))
1396                         goto err1;
1397                 else if (is_auth_only(ses))
1398                         ctx = build_auth_only_sg(op, ses);
1399                 else if (is_cipher_only(ses))
1400                         ctx = build_cipher_only_sg(op, ses);
1401         }
1402 err1:
1403         if (unlikely(!ctx)) {
1404                 qp->tx_errs++;
1405                 CAAM_JR_ERR("not supported sec op");
1406                 return -1;
1407         }
1408 #if CAAM_JR_DBG
1409         if (is_decode(ses))
1410                 rte_hexdump(stdout, "DECODE",
1411                         rte_pktmbuf_mtod(op->sym->m_src, void *),
1412                         rte_pktmbuf_data_len(op->sym->m_src));
1413         else
1414                 rte_hexdump(stdout, "ENCODE",
1415                         rte_pktmbuf_mtod(op->sym->m_src, void *),
1416                         rte_pktmbuf_data_len(op->sym->m_src));
1417
1418         printf("\n JD before conversion\n");
1419         for (int i = 0; i < 12; i++)
1420                 printf("\n 0x%08x", ctx->jobdes.desc[i]);
1421 #endif
1422
1423         CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
1424                       ring, ring->pidx, ring->cidx);
1425
1426         /* todo - do we want to retry */
1427         if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
1428                          SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
1429                 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
1430                               ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
1431                 caam_jr_op_ending(ctx);
1432                 qp->tx_ring_full++;
1433                 return -EBUSY;
1434         }
1435
1436 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
1437         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1438
1439         jobdescr->deschdr.command.word =
1440                 cpu_to_caam32(jobdescr->deschdr.command.word);
1441         jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
1442         jobdescr->seq_out.command.word =
1443                 cpu_to_caam32(jobdescr->seq_out.command.word);
1444         jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
1445         jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
1446         jobdescr->seq_in.command.word =
1447                 cpu_to_caam32(jobdescr->seq_in.command.word);
1448         jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
1449         jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
1450         jobdescr->load_dpovrd.command.word =
1451                 cpu_to_caam32(jobdescr->load_dpovrd.command.word);
1452         jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
1453 #endif
1454
1455         /* Set ptr in input ring to current descriptor  */
1456         sec_write_addr(&ring->input_ring[ring->pidx],
1457                         (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
1458         rte_smp_wmb();
1459
1460         /* Notify HW that a new job is enqueued */
1461         hw_enqueue_desc_on_job_ring(ring);
1462
1463         /* increment the producer index for the current job ring */
1464         ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
1465
1466         return 0;
1467 }
1468
1469 static uint16_t
1470 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1471                        uint16_t nb_ops)
1472 {
1473         /* Function to transmit the frames to given device and queuepair */
1474         uint32_t loop;
1475         int32_t ret;
1476         struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
1477         uint16_t num_tx = 0;
1478         /*Prepare each packet which is to be sent*/
1479         for (loop = 0; loop < nb_ops; loop++) {
1480                 ret = caam_jr_enqueue_op(ops[loop], jr_qp);
1481                 if (!ret)
1482                         num_tx++;
1483         }
1484
1485         jr_qp->tx_pkts += num_tx;
1486
1487         return num_tx;
1488 }
1489
1490 /* Release queue pair */
1491 static int
1492 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1493                            uint16_t qp_id)
1494 {
1495         struct sec_job_ring_t *internals;
1496         struct caam_jr_qp *qp = NULL;
1497
1498         PMD_INIT_FUNC_TRACE();
1499         CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1500
1501         internals = dev->data->dev_private;
1502         if (qp_id >= internals->max_nb_queue_pairs) {
1503                 CAAM_JR_ERR("Max supported qpid %d",
1504                              internals->max_nb_queue_pairs);
1505                 return -EINVAL;
1506         }
1507
1508         qp = &internals->qps[qp_id];
1509         qp->ring = NULL;
1510         dev->data->queue_pairs[qp_id] = NULL;
1511
1512         return 0;
1513 }
1514
1515 /* Setup a queue pair */
1516 static int
1517 caam_jr_queue_pair_setup(
1518                 struct rte_cryptodev *dev, uint16_t qp_id,
1519                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1520                 __rte_unused int socket_id)
1521 {
1522         struct sec_job_ring_t *internals;
1523         struct caam_jr_qp *qp = NULL;
1524
1525         PMD_INIT_FUNC_TRACE();
1526         CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1527
1528         internals = dev->data->dev_private;
1529         if (qp_id >= internals->max_nb_queue_pairs) {
1530                 CAAM_JR_ERR("Max supported qpid %d",
1531                              internals->max_nb_queue_pairs);
1532                 return -EINVAL;
1533         }
1534
1535         qp = &internals->qps[qp_id];
1536         qp->ring = internals;
1537         dev->data->queue_pairs[qp_id] = qp;
1538
1539         return 0;
1540 }
1541
1542 /* Return the number of allocated queue pairs */
1543 static uint32_t
1544 caam_jr_queue_pair_count(struct rte_cryptodev *dev)
1545 {
1546         PMD_INIT_FUNC_TRACE();
1547
1548         return dev->data->nb_queue_pairs;
1549 }
1550
1551 /* Returns the size of the aesni gcm session structure */
1552 static unsigned int
1553 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1554 {
1555         PMD_INIT_FUNC_TRACE();
1556
1557         return sizeof(struct caam_jr_session);
1558 }
1559
1560 static int
1561 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1562                     struct rte_crypto_sym_xform *xform,
1563                     struct caam_jr_session *session)
1564 {
1565         session->cipher_alg = xform->cipher.algo;
1566         session->iv.length = xform->cipher.iv.length;
1567         session->iv.offset = xform->cipher.iv.offset;
1568         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1569                                                RTE_CACHE_LINE_SIZE);
1570         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1571                 CAAM_JR_ERR("No Memory for cipher key\n");
1572                 return -ENOMEM;
1573         }
1574         session->cipher_key.length = xform->cipher.key.length;
1575
1576         memcpy(session->cipher_key.data, xform->cipher.key.data,
1577                xform->cipher.key.length);
1578         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1579                         DIR_ENC : DIR_DEC;
1580
1581         return 0;
1582 }
1583
1584 static int
1585 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1586                   struct rte_crypto_sym_xform *xform,
1587                   struct caam_jr_session *session)
1588 {
1589         session->auth_alg = xform->auth.algo;
1590         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1591                                              RTE_CACHE_LINE_SIZE);
1592         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1593                 CAAM_JR_ERR("No Memory for auth key\n");
1594                 return -ENOMEM;
1595         }
1596         session->auth_key.length = xform->auth.key.length;
1597         session->digest_length = xform->auth.digest_length;
1598
1599         memcpy(session->auth_key.data, xform->auth.key.data,
1600                xform->auth.key.length);
1601         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1602                         DIR_ENC : DIR_DEC;
1603
1604         return 0;
1605 }
1606
1607 static int
1608 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1609                   struct rte_crypto_sym_xform *xform,
1610                   struct caam_jr_session *session)
1611 {
1612         session->aead_alg = xform->aead.algo;
1613         session->iv.length = xform->aead.iv.length;
1614         session->iv.offset = xform->aead.iv.offset;
1615         session->auth_only_len = xform->aead.aad_length;
1616         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1617                                              RTE_CACHE_LINE_SIZE);
1618         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1619                 CAAM_JR_ERR("No Memory for aead key\n");
1620                 return -ENOMEM;
1621         }
1622         session->aead_key.length = xform->aead.key.length;
1623         session->digest_length = xform->aead.digest_length;
1624
1625         memcpy(session->aead_key.data, xform->aead.key.data,
1626                xform->aead.key.length);
1627         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1628                         DIR_ENC : DIR_DEC;
1629
1630         return 0;
1631 }
1632
1633 static int
1634 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1635                                struct rte_crypto_sym_xform *xform, void *sess)
1636 {
1637         struct sec_job_ring_t *internals = dev->data->dev_private;
1638         struct caam_jr_session *session = sess;
1639
1640         PMD_INIT_FUNC_TRACE();
1641
1642         if (unlikely(sess == NULL)) {
1643                 CAAM_JR_ERR("invalid session struct");
1644                 return -EINVAL;
1645         }
1646
1647         /* Default IV length = 0 */
1648         session->iv.length = 0;
1649
1650         /* Cipher Only */
1651         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1652                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1653                 caam_jr_cipher_init(dev, xform, session);
1654
1655         /* Authentication Only */
1656         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1657                    xform->next == NULL) {
1658                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1659                 caam_jr_auth_init(dev, xform, session);
1660
1661         /* Cipher then Authenticate */
1662         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1663                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1664                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1665                         caam_jr_cipher_init(dev, xform, session);
1666                         caam_jr_auth_init(dev, xform->next, session);
1667                 } else {
1668                         CAAM_JR_ERR("Not supported: Auth then Cipher");
1669                         goto err1;
1670                 }
1671
1672         /* Authenticate then Cipher */
1673         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1674                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1675                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1676                         caam_jr_auth_init(dev, xform, session);
1677                         caam_jr_cipher_init(dev, xform->next, session);
1678                 } else {
1679                         CAAM_JR_ERR("Not supported: Auth then Cipher");
1680                         goto err1;
1681                 }
1682
1683         /* AEAD operation for AES-GCM kind of Algorithms */
1684         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1685                    xform->next == NULL) {
1686                 caam_jr_aead_init(dev, xform, session);
1687
1688         } else {
1689                 CAAM_JR_ERR("Invalid crypto type");
1690                 return -EINVAL;
1691         }
1692         session->ctx_pool = internals->ctx_pool;
1693
1694         return 0;
1695
1696 err1:
1697         rte_free(session->cipher_key.data);
1698         rte_free(session->auth_key.data);
1699         memset(session, 0, sizeof(struct caam_jr_session));
1700
1701         return -EINVAL;
1702 }
1703
1704 static int
1705 caam_jr_sym_session_configure(struct rte_cryptodev *dev,
1706                               struct rte_crypto_sym_xform *xform,
1707                               struct rte_cryptodev_sym_session *sess,
1708                               struct rte_mempool *mempool)
1709 {
1710         void *sess_private_data;
1711         int ret;
1712
1713         PMD_INIT_FUNC_TRACE();
1714
1715         if (rte_mempool_get(mempool, &sess_private_data)) {
1716                 CAAM_JR_ERR("Couldn't get object from session mempool");
1717                 return -ENOMEM;
1718         }
1719
1720         memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1721         ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1722         if (ret != 0) {
1723                 CAAM_JR_ERR("failed to configure session parameters");
1724                 /* Return session to mempool */
1725                 rte_mempool_put(mempool, sess_private_data);
1726                 return ret;
1727         }
1728
1729         set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
1730
1731         return 0;
1732 }
1733
1734 /* Clear the memory of session so it doesn't leave key material behind */
1735 static void
1736 caam_jr_sym_session_clear(struct rte_cryptodev *dev,
1737                 struct rte_cryptodev_sym_session *sess)
1738 {
1739         uint8_t index = dev->driver_id;
1740         void *sess_priv = get_sym_session_private_data(sess, index);
1741         struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1742
1743         PMD_INIT_FUNC_TRACE();
1744
1745         if (sess_priv) {
1746                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1747
1748                 rte_free(s->cipher_key.data);
1749                 rte_free(s->auth_key.data);
1750                 memset(s, 0, sizeof(struct caam_jr_session));
1751                 set_sym_session_private_data(sess, index, NULL);
1752                 rte_mempool_put(sess_mp, sess_priv);
1753         }
1754 }
1755
1756 static int
1757 caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1758                           struct rte_security_session_conf *conf,
1759                           void *sess)
1760 {
1761         struct sec_job_ring_t *internals = dev->data->dev_private;
1762         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1763         struct rte_crypto_auth_xform *auth_xform;
1764         struct rte_crypto_cipher_xform *cipher_xform;
1765         struct caam_jr_session *session = (struct caam_jr_session *)sess;
1766
1767         PMD_INIT_FUNC_TRACE();
1768
1769         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1770                 cipher_xform = &conf->crypto_xform->cipher;
1771                 auth_xform = &conf->crypto_xform->next->auth;
1772         } else {
1773                 auth_xform = &conf->crypto_xform->auth;
1774                 cipher_xform = &conf->crypto_xform->next->cipher;
1775         }
1776         session->proto_alg = conf->protocol;
1777         session->cipher_key.data = rte_zmalloc(NULL,
1778                                                cipher_xform->key.length,
1779                                                RTE_CACHE_LINE_SIZE);
1780         if (session->cipher_key.data == NULL &&
1781                         cipher_xform->key.length > 0) {
1782                 CAAM_JR_ERR("No Memory for cipher key\n");
1783                 return -ENOMEM;
1784         }
1785
1786         session->cipher_key.length = cipher_xform->key.length;
1787         session->auth_key.data = rte_zmalloc(NULL,
1788                                         auth_xform->key.length,
1789                                         RTE_CACHE_LINE_SIZE);
1790         if (session->auth_key.data == NULL &&
1791                         auth_xform->key.length > 0) {
1792                 CAAM_JR_ERR("No Memory for auth key\n");
1793                 rte_free(session->cipher_key.data);
1794                 return -ENOMEM;
1795         }
1796         session->auth_key.length = auth_xform->key.length;
1797         memcpy(session->cipher_key.data, cipher_xform->key.data,
1798                         cipher_xform->key.length);
1799         memcpy(session->auth_key.data, auth_xform->key.data,
1800                         auth_xform->key.length);
1801
1802         switch (auth_xform->algo) {
1803         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1804                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1805                 break;
1806         case RTE_CRYPTO_AUTH_MD5_HMAC:
1807                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1808                 break;
1809         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1810                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1811                 break;
1812         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1813                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1814                 break;
1815         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1816                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1817                 break;
1818         case RTE_CRYPTO_AUTH_AES_CMAC:
1819                 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1820                 break;
1821         case RTE_CRYPTO_AUTH_NULL:
1822                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1823                 break;
1824         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1825         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1826         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1827         case RTE_CRYPTO_AUTH_SHA1:
1828         case RTE_CRYPTO_AUTH_SHA256:
1829         case RTE_CRYPTO_AUTH_SHA512:
1830         case RTE_CRYPTO_AUTH_SHA224:
1831         case RTE_CRYPTO_AUTH_SHA384:
1832         case RTE_CRYPTO_AUTH_MD5:
1833         case RTE_CRYPTO_AUTH_AES_GMAC:
1834         case RTE_CRYPTO_AUTH_KASUMI_F9:
1835         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1836         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1837                 CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
1838                         auth_xform->algo);
1839                 goto out;
1840         default:
1841                 CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
1842                         auth_xform->algo);
1843                 goto out;
1844         }
1845
1846         switch (cipher_xform->algo) {
1847         case RTE_CRYPTO_CIPHER_AES_CBC:
1848                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1849                 break;
1850         case RTE_CRYPTO_CIPHER_3DES_CBC:
1851                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1852                 break;
1853         case RTE_CRYPTO_CIPHER_AES_CTR:
1854                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1855                 break;
1856         case RTE_CRYPTO_CIPHER_NULL:
1857         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1858         case RTE_CRYPTO_CIPHER_3DES_ECB:
1859         case RTE_CRYPTO_CIPHER_AES_ECB:
1860         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1861                 CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
1862                         cipher_xform->algo);
1863                 goto out;
1864         default:
1865                 CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
1866                         cipher_xform->algo);
1867                 goto out;
1868         }
1869
1870         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1871                 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1872                                 sizeof(session->ip4_hdr));
1873                 session->ip4_hdr.ip_v = IPVERSION;
1874                 session->ip4_hdr.ip_hl = 5;
1875                 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1876                                                 sizeof(session->ip4_hdr));
1877                 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1878                 session->ip4_hdr.ip_id = 0;
1879                 session->ip4_hdr.ip_off = 0;
1880                 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1881                 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1882                                 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1883                                 : IPPROTO_AH;
1884                 session->ip4_hdr.ip_sum = 0;
1885                 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1886                 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1887                 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1888                                                 (void *)&session->ip4_hdr,
1889                                                 sizeof(struct ip));
1890
1891                 session->encap_pdb.options =
1892                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1893                         PDBOPTS_ESP_OIHI_PDB_INL |
1894                         PDBOPTS_ESP_IVSRC |
1895                         PDBHMO_ESP_ENCAP_DTTL;
1896                 if (ipsec_xform->options.esn)
1897                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
1898                 session->encap_pdb.spi = ipsec_xform->spi;
1899                 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1900
1901                 session->dir = DIR_ENC;
1902         } else if (ipsec_xform->direction ==
1903                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1904                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1905                 session->decap_pdb.options = sizeof(struct ip) << 16;
1906                 if (ipsec_xform->options.esn)
1907                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
1908                 session->dir = DIR_DEC;
1909         } else
1910                 goto out;
1911         session->ctx_pool = internals->ctx_pool;
1912
1913         return 0;
1914 out:
1915         rte_free(session->auth_key.data);
1916         rte_free(session->cipher_key.data);
1917         memset(session, 0, sizeof(struct caam_jr_session));
1918         return -1;
1919 }
1920
1921 static int
1922 caam_jr_security_session_create(void *dev,
1923                                 struct rte_security_session_conf *conf,
1924                                 struct rte_security_session *sess,
1925                                 struct rte_mempool *mempool)
1926 {
1927         void *sess_private_data;
1928         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1929         int ret;
1930
1931         if (rte_mempool_get(mempool, &sess_private_data)) {
1932                 CAAM_JR_ERR("Couldn't get object from session mempool");
1933                 return -ENOMEM;
1934         }
1935
1936         switch (conf->protocol) {
1937         case RTE_SECURITY_PROTOCOL_IPSEC:
1938                 ret = caam_jr_set_ipsec_session(cdev, conf,
1939                                 sess_private_data);
1940                 break;
1941         case RTE_SECURITY_PROTOCOL_MACSEC:
1942                 return -ENOTSUP;
1943         default:
1944                 return -EINVAL;
1945         }
1946         if (ret != 0) {
1947                 CAAM_JR_ERR("failed to configure session parameters");
1948                 /* Return session to mempool */
1949                 rte_mempool_put(mempool, sess_private_data);
1950                 return ret;
1951         }
1952
1953         set_sec_session_private_data(sess, sess_private_data);
1954
1955         return ret;
1956 }
1957
1958 /* Clear the memory of session so it doesn't leave key material behind */
1959 static int
1960 caam_jr_security_session_destroy(void *dev __rte_unused,
1961                                  struct rte_security_session *sess)
1962 {
1963         PMD_INIT_FUNC_TRACE();
1964         void *sess_priv = get_sec_session_private_data(sess);
1965
1966         struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1967
1968         if (sess_priv) {
1969                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1970
1971                 rte_free(s->cipher_key.data);
1972                 rte_free(s->auth_key.data);
1973                 memset(sess, 0, sizeof(struct caam_jr_session));
1974                 set_sec_session_private_data(sess, NULL);
1975                 rte_mempool_put(sess_mp, sess_priv);
1976         }
1977         return 0;
1978 }
1979
1980
1981 static int
1982 caam_jr_dev_configure(struct rte_cryptodev *dev,
1983                        struct rte_cryptodev_config *config __rte_unused)
1984 {
1985         char str[20];
1986         struct sec_job_ring_t *internals;
1987
1988         PMD_INIT_FUNC_TRACE();
1989
1990         internals = dev->data->dev_private;
1991         snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
1992         if (!internals->ctx_pool) {
1993                 internals->ctx_pool = rte_mempool_create((const char *)str,
1994                                                 CTX_POOL_NUM_BUFS,
1995                                                 sizeof(struct caam_jr_op_ctx),
1996                                                 CTX_POOL_CACHE_SIZE, 0,
1997                                                 NULL, NULL, NULL, NULL,
1998                                                 SOCKET_ID_ANY, 0);
1999                 if (!internals->ctx_pool) {
2000                         CAAM_JR_ERR("%s create failed\n", str);
2001                         return -ENOMEM;
2002                 }
2003         } else
2004                 CAAM_JR_INFO("mempool already created for dev_id : %d",
2005                                 dev->data->dev_id);
2006
2007         return 0;
2008 }
2009
2010 static int
2011 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
2012 {
2013         PMD_INIT_FUNC_TRACE();
2014         return 0;
2015 }
2016
2017 static void
2018 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
2019 {
2020         PMD_INIT_FUNC_TRACE();
2021 }
2022
2023 static int
2024 caam_jr_dev_close(struct rte_cryptodev *dev)
2025 {
2026         struct sec_job_ring_t *internals;
2027
2028         PMD_INIT_FUNC_TRACE();
2029
2030         if (dev == NULL)
2031                 return -ENOMEM;
2032
2033         internals = dev->data->dev_private;
2034         rte_mempool_free(internals->ctx_pool);
2035         internals->ctx_pool = NULL;
2036
2037         return 0;
2038 }
2039
2040 static void
2041 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
2042                        struct rte_cryptodev_info *info)
2043 {
2044         struct sec_job_ring_t *internals = dev->data->dev_private;
2045
2046         PMD_INIT_FUNC_TRACE();
2047         if (info != NULL) {
2048                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2049                 info->feature_flags = dev->feature_flags;
2050                 info->capabilities = caam_jr_get_cryptodev_capabilities();
2051                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2052                 info->driver_id = cryptodev_driver_id;
2053         }
2054 }
2055
2056 static struct rte_cryptodev_ops caam_jr_ops = {
2057         .dev_configure        = caam_jr_dev_configure,
2058         .dev_start            = caam_jr_dev_start,
2059         .dev_stop             = caam_jr_dev_stop,
2060         .dev_close            = caam_jr_dev_close,
2061         .dev_infos_get        = caam_jr_dev_infos_get,
2062         .stats_get            = caam_jr_stats_get,
2063         .stats_reset          = caam_jr_stats_reset,
2064         .queue_pair_setup     = caam_jr_queue_pair_setup,
2065         .queue_pair_release   = caam_jr_queue_pair_release,
2066         .queue_pair_count     = caam_jr_queue_pair_count,
2067         .sym_session_get_size = caam_jr_sym_session_get_size,
2068         .sym_session_configure = caam_jr_sym_session_configure,
2069         .sym_session_clear    = caam_jr_sym_session_clear
2070 };
2071
2072 static struct rte_security_ops caam_jr_security_ops = {
2073         .session_create = caam_jr_security_session_create,
2074         .session_update = NULL,
2075         .session_stats_get = NULL,
2076         .session_destroy = caam_jr_security_session_destroy,
2077         .set_pkt_metadata = NULL,
2078         .capabilities_get = caam_jr_get_security_capabilities
2079 };
2080
2081 /* @brief Flush job rings of any processed descs.
2082  * The processed descs are silently dropped,
2083  * WITHOUT being notified to UA.
2084  */
2085 static void
2086 close_job_ring(struct sec_job_ring_t *job_ring)
2087 {
2088         if (job_ring->irq_fd) {
2089                 /* Producer index is frozen. If consumer index is not equal
2090                  * with producer index, then we have descs to flush.
2091                  */
2092                 while (job_ring->pidx != job_ring->cidx)
2093                         hw_flush_job_ring(job_ring, false, NULL);
2094
2095                 /* free the uio job ring */
2096                 free_job_ring(job_ring->irq_fd);
2097                 job_ring->irq_fd = 0;
2098                 caam_jr_dma_free(job_ring->input_ring);
2099                 caam_jr_dma_free(job_ring->output_ring);
2100                 g_job_rings_no--;
2101         }
2102 }
2103
2104 /** @brief Release the software and hardware resources tied to a job ring.
2105  * @param [in] job_ring The job ring
2106  *
2107  * @retval  0 for success
2108  * @retval  -1 for error
2109  */
2110 static int
2111 shutdown_job_ring(struct sec_job_ring_t *job_ring)
2112 {
2113         int ret = 0;
2114
2115         PMD_INIT_FUNC_TRACE();
2116         ASSERT(job_ring != NULL);
2117         ret = hw_shutdown_job_ring(job_ring);
2118         SEC_ASSERT(ret == 0, ret,
2119                 "Failed to shutdown hardware job ring %p",
2120                 job_ring);
2121
2122         if (job_ring->coalescing_en)
2123                 hw_job_ring_disable_coalescing(job_ring);
2124
2125         if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
2126                 ret = caam_jr_disable_irqs(job_ring->irq_fd);
2127                 SEC_ASSERT(ret == 0, ret,
2128                 "Failed to disable irqs for job ring %p",
2129                 job_ring);
2130         }
2131
2132         return ret;
2133 }
2134
2135 /*
2136  * @brief Release the resources used by the SEC user space driver.
2137  *
2138  * Reset and release SEC's job rings indicated by the User Application at
2139  * init_job_ring() and free any memory allocated internally.
2140  * Call once during application tear down.
2141  *
2142  * @note In case there are any descriptors in-flight (descriptors received by
2143  * SEC driver for processing and for which no response was yet provided to UA),
2144  * the descriptors are discarded without any notifications to User Application.
2145  *
2146  * @retval ::0                  is returned for a successful execution
2147  * @retval ::-1         is returned if SEC driver release is in progress
2148  */
2149 static int
2150 caam_jr_dev_uninit(struct rte_cryptodev *dev)
2151 {
2152         struct sec_job_ring_t *internals;
2153
2154         PMD_INIT_FUNC_TRACE();
2155         if (dev == NULL)
2156                 return -ENODEV;
2157
2158         internals = dev->data->dev_private;
2159         rte_free(dev->security_ctx);
2160
2161         /* If any descriptors in flight , poll and wait
2162          * until all descriptors are received and silently discarded.
2163          */
2164         if (internals) {
2165                 shutdown_job_ring(internals);
2166                 close_job_ring(internals);
2167                 rte_mempool_free(internals->ctx_pool);
2168         }
2169
2170         CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
2171
2172         /* last caam jr instance) */
2173         if (g_job_rings_no == 0)
2174                 g_driver_state = SEC_DRIVER_STATE_IDLE;
2175
2176         return SEC_SUCCESS;
2177 }
2178
2179 /* @brief Initialize the software and hardware resources tied to a job ring.
2180  * @param [in] jr_mode;         Model to be used by SEC Driver to receive
2181  *                              notifications from SEC.  Can be either
2182  *                              of the three: #SEC_NOTIFICATION_TYPE_NAPI
2183  *                              #SEC_NOTIFICATION_TYPE_IRQ or
2184  *                              #SEC_NOTIFICATION_TYPE_POLL
2185  * @param [in] NAPI_mode        The NAPI work mode to configure a job ring at
2186  *                              startup. Used only when #SEC_NOTIFICATION_TYPE
2187  *                              is set to #SEC_NOTIFICATION_TYPE_NAPI.
2188  * @param [in] irq_coalescing_timer This value determines the maximum
2189  *                                      amount of time after processing a
2190  *                                      descriptor before raising an interrupt.
2191  * @param [in] irq_coalescing_count This value determines how many
2192  *                                      descriptors are completed before
2193  *                                      raising an interrupt.
2194  * @param [in] reg_base_addr,   The job ring base address register
2195  * @param [in] irq_id           The job ring interrupt identification number.
2196  * @retval  job_ring_handle for successful job ring configuration
2197  * @retval  NULL on error
2198  *
2199  */
2200 static void *
2201 init_job_ring(void *reg_base_addr, uint32_t irq_id)
2202 {
2203         struct sec_job_ring_t *job_ring = NULL;
2204         int i, ret = 0;
2205         int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
2206         int napi_mode = 0;
2207         int irq_coalescing_timer = 0;
2208         int irq_coalescing_count = 0;
2209
2210         for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
2211                 if (g_job_rings[i].irq_fd == 0) {
2212                         job_ring = &g_job_rings[i];
2213                         g_job_rings_no++;
2214                         break;
2215                 }
2216         }
2217         if (job_ring == NULL) {
2218                 CAAM_JR_ERR("No free job ring\n");
2219                 return NULL;
2220         }
2221
2222         job_ring->register_base_addr = reg_base_addr;
2223         job_ring->jr_mode = jr_mode;
2224         job_ring->napi_mode = 0;
2225         job_ring->irq_fd = irq_id;
2226
2227         /* Allocate mem for input and output ring */
2228
2229         /* Allocate memory for input ring */
2230         job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2231                                 SEC_DMA_MEM_INPUT_RING_SIZE);
2232         memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
2233
2234         /* Allocate memory for output ring */
2235         job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2236                                 SEC_DMA_MEM_OUTPUT_RING_SIZE);
2237         memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
2238
2239         /* Reset job ring in SEC hw and configure job ring registers */
2240         ret = hw_reset_job_ring(job_ring);
2241         if (ret != 0) {
2242                 CAAM_JR_ERR("Failed to reset hardware job ring");
2243                 goto cleanup;
2244         }
2245
2246         if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
2247         /* When SEC US driver works in NAPI mode, the UA can select
2248          * if the driver starts with IRQs on or off.
2249          */
2250                 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
2251                         CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
2252                                 job_ring);
2253                         ret = caam_jr_enable_irqs(job_ring->irq_fd);
2254                         if (ret != 0) {
2255                                 CAAM_JR_ERR("Failed to enable irqs for job ring");
2256                                 goto cleanup;
2257                         }
2258                 }
2259         } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
2260         /* When SEC US driver works in pure interrupt mode,
2261          * IRQ's are always enabled.
2262          */
2263                 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
2264                          job_ring);
2265                 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2266                 if (ret != 0) {
2267                         CAAM_JR_ERR("Failed to enable irqs for job ring");
2268                         goto cleanup;
2269                 }
2270         }
2271         if (irq_coalescing_timer || irq_coalescing_count) {
2272                 hw_job_ring_set_coalescing_param(job_ring,
2273                          irq_coalescing_timer,
2274                          irq_coalescing_count);
2275
2276                 hw_job_ring_enable_coalescing(job_ring);
2277                 job_ring->coalescing_en = 1;
2278         }
2279
2280         job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
2281         job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
2282         job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
2283
2284         return job_ring;
2285 cleanup:
2286         caam_jr_dma_free(job_ring->output_ring);
2287         caam_jr_dma_free(job_ring->input_ring);
2288         return NULL;
2289 }
2290
2291
2292 static int
2293 caam_jr_dev_init(const char *name,
2294                  struct rte_vdev_device *vdev,
2295                  struct rte_cryptodev_pmd_init_params *init_params)
2296 {
2297         struct rte_cryptodev *dev;
2298         struct rte_security_ctx *security_instance;
2299         struct uio_job_ring *job_ring;
2300         char str[RTE_CRYPTODEV_NAME_MAX_LEN];
2301
2302         PMD_INIT_FUNC_TRACE();
2303
2304         /* Validate driver state */
2305         if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
2306                 g_job_rings_max = sec_configure();
2307                 if (!g_job_rings_max) {
2308                         CAAM_JR_ERR("No job ring detected on UIO !!!!");
2309                         return -1;
2310                 }
2311                 /* Update driver state */
2312                 g_driver_state = SEC_DRIVER_STATE_STARTED;
2313         }
2314
2315         if (g_job_rings_no >= g_job_rings_max) {
2316                 CAAM_JR_ERR("No more job rings available max=%d!!!!",
2317                                 g_job_rings_max);
2318                 return -1;
2319         }
2320
2321         job_ring = config_job_ring();
2322         if (job_ring == NULL) {
2323                 CAAM_JR_ERR("failed to create job ring");
2324                 goto init_error;
2325         }
2326
2327         snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
2328
2329         dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2330         if (dev == NULL) {
2331                 CAAM_JR_ERR("failed to create cryptodev vdev");
2332                 goto cleanup;
2333         }
2334         /*TODO free it during teardown*/
2335         dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
2336                                                 job_ring->uio_fd);
2337
2338         if (!dev->data->dev_private) {
2339                 CAAM_JR_ERR("Ring memory allocation failed\n");
2340                 goto cleanup2;
2341         }
2342
2343         dev->driver_id = cryptodev_driver_id;
2344         dev->dev_ops = &caam_jr_ops;
2345
2346         /* register rx/tx burst functions for data path */
2347         dev->dequeue_burst = caam_jr_dequeue_burst;
2348         dev->enqueue_burst = caam_jr_enqueue_burst;
2349         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2350                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2351                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2352                         RTE_CRYPTODEV_FF_SECURITY |
2353                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2354                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2355                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2356                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2357                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2358
2359         /* For secondary processes, we don't initialise any further as primary
2360          * has already done this work. Only check we don't need a different
2361          * RX function
2362          */
2363         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2364                 CAAM_JR_WARN("Device already init by primary process");
2365                 return 0;
2366         }
2367
2368         /*TODO free it during teardown*/
2369         security_instance = rte_malloc("caam_jr",
2370                                 sizeof(struct rte_security_ctx), 0);
2371         if (security_instance == NULL) {
2372                 CAAM_JR_ERR("memory allocation failed\n");
2373                 //todo error handling.
2374                 goto cleanup2;
2375         }
2376
2377         security_instance->device = (void *)dev;
2378         security_instance->ops = &caam_jr_security_ops;
2379         security_instance->sess_cnt = 0;
2380         dev->security_ctx = security_instance;
2381
2382         RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
2383
2384         return 0;
2385
2386 cleanup2:
2387         caam_jr_dev_uninit(dev);
2388         rte_cryptodev_pmd_release_device(dev);
2389 cleanup:
2390         free_job_ring(job_ring->uio_fd);
2391 init_error:
2392         CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
2393                         init_params->name);
2394
2395         return -ENXIO;
2396 }
2397
2398 /** Initialise CAAM JR crypto device */
2399 static int
2400 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
2401 {
2402         struct rte_cryptodev_pmd_init_params init_params = {
2403                 "",
2404                 sizeof(struct sec_job_ring_t),
2405                 rte_socket_id(),
2406                 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2407         };
2408         const char *name;
2409         const char *input_args;
2410
2411         name = rte_vdev_device_name(vdev);
2412         if (name == NULL)
2413                 return -EINVAL;
2414
2415         input_args = rte_vdev_device_args(vdev);
2416         rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
2417
2418         /* if sec device version is not configured */
2419         if (!rta_get_sec_era()) {
2420                 const struct device_node *caam_node;
2421
2422                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2423                         const uint32_t *prop = of_get_property(caam_node,
2424                                         "fsl,sec-era",
2425                                         NULL);
2426                         if (prop) {
2427                                 rta_set_sec_era(
2428                                         INTL_SEC_ERA(cpu_to_caam32(*prop)));
2429                                 break;
2430                         }
2431                 }
2432         }
2433 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
2434         if (rta_get_sec_era() > RTA_SEC_ERA_8) {
2435                 RTE_LOG(ERR, PMD,
2436                 "CAAM is compiled in BE mode for device with sec era > 8???\n");
2437                 return -EINVAL;
2438         }
2439 #endif
2440
2441         return caam_jr_dev_init(name, vdev, &init_params);
2442 }
2443
2444 /** Uninitialise CAAM JR crypto device */
2445 static int
2446 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
2447 {
2448         struct rte_cryptodev *cryptodev;
2449         const char *name;
2450
2451         name = rte_vdev_device_name(vdev);
2452         if (name == NULL)
2453                 return -EINVAL;
2454
2455         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2456         if (cryptodev == NULL)
2457                 return -ENODEV;
2458
2459         caam_jr_dev_uninit(cryptodev);
2460
2461         return rte_cryptodev_pmd_destroy(cryptodev);
2462 }
2463
2464 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
2465         .probe = cryptodev_caam_jr_probe,
2466         .remove = cryptodev_caam_jr_remove
2467 };
2468
2469 static struct cryptodev_driver caam_jr_crypto_drv;
2470
2471 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
2472 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
2473         "max_nb_queue_pairs=<int>"
2474         "socket_id=<int>");
2475 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
2476                 cryptodev_driver_id);
2477
2478 RTE_INIT(caam_jr_init_log)
2479 {
2480         caam_jr_logtype = rte_log_register("pmd.crypto.caam");
2481         if (caam_jr_logtype >= 0)
2482                 rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);
2483 }