crypto/caam_jr: reduce function traces in critical path
[dpdk.git] / drivers / crypto / caam_jr / caam_jr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017-2019 NXP
3  */
4
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <sched.h>
8 #include <net/if.h>
9
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
19
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
26
27 /* RTA header files */
28 #include <hw/desc/common.h>
29 #include <hw/desc/algo.h>
30 #include <of.h>
31 #ifdef RTE_LIBRTE_PMD_CAAM_JR_DEBUG
32 #define CAAM_JR_DBG     1
33 #else
34 #define CAAM_JR_DBG     0
35 #endif
36 #define CRYPTODEV_NAME_CAAM_JR_PMD      crypto_caam_jr
37 static uint8_t cryptodev_driver_id;
38 int caam_jr_logtype;
39
40 enum rta_sec_era rta_sec_era;
41
42 /* Lists the states possible for the SEC user space driver. */
43 enum sec_driver_state_e {
44         SEC_DRIVER_STATE_IDLE,          /* Driver not initialized */
45         SEC_DRIVER_STATE_STARTED,       /* Driver initialized and can be used*/
46         SEC_DRIVER_STATE_RELEASE,       /* Driver release is in progress */
47 };
48
49 /* Job rings used for communication with SEC HW */
50 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
51
52 /* The current state of SEC user space driver */
53 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
54
55 /* The number of job rings used by SEC user space driver */
56 static int g_job_rings_no;
57 static int g_job_rings_max;
58
59 struct sec_outring_entry {
60         phys_addr_t desc;       /* Pointer to completed descriptor */
61         uint32_t status;        /* Status for completed descriptor */
62 } __rte_packed;
63
64 /* virtual address conversin when mempool support is available for ctx */
65 static inline phys_addr_t
66 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
67 {
68         return (size_t)vaddr - ctx->vtop_offset;
69 }
70
71 static inline void
72 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
73 {
74         /* report op status to sym->op and then free the ctx memory  */
75         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
76 }
77
78 static inline struct caam_jr_op_ctx *
79 caam_jr_alloc_ctx(struct caam_jr_session *ses)
80 {
81         struct caam_jr_op_ctx *ctx;
82         int ret;
83
84         ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
85         if (!ctx || ret) {
86                 CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
87                 return NULL;
88         }
89         /*
90          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
91          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
92          * to clear all the SG entries. caam_jr_alloc_ctx() is called for
93          * each packet, memset is costlier than dcbz_64().
94          */
95         dcbz_64(&ctx->sg[SG_CACHELINE_0]);
96         dcbz_64(&ctx->sg[SG_CACHELINE_1]);
97         dcbz_64(&ctx->sg[SG_CACHELINE_2]);
98         dcbz_64(&ctx->sg[SG_CACHELINE_3]);
99
100         ctx->ctx_pool = ses->ctx_pool;
101         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
102
103         return ctx;
104 }
105
106 static
107 void caam_jr_stats_get(struct rte_cryptodev *dev,
108                         struct rte_cryptodev_stats *stats)
109 {
110         struct caam_jr_qp **qp = (struct caam_jr_qp **)
111                                         dev->data->queue_pairs;
112         int i;
113
114         PMD_INIT_FUNC_TRACE();
115         if (stats == NULL) {
116                 CAAM_JR_ERR("Invalid stats ptr NULL");
117                 return;
118         }
119         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
120                 if (qp[i] == NULL) {
121                         CAAM_JR_WARN("Uninitialised queue pair");
122                         continue;
123                 }
124
125                 stats->enqueued_count += qp[i]->tx_pkts;
126                 stats->dequeued_count += qp[i]->rx_pkts;
127                 stats->enqueue_err_count += qp[i]->tx_errs;
128                 stats->dequeue_err_count += qp[i]->rx_errs;
129                 CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
130                              "\n\tTX Ring Full = %" PRIu64,
131                              qp[i]->rx_poll_err,
132                              qp[i]->tx_ring_full);
133         }
134 }
135
136 static
137 void caam_jr_stats_reset(struct rte_cryptodev *dev)
138 {
139         int i;
140         struct caam_jr_qp **qp = (struct caam_jr_qp **)
141                                    (dev->data->queue_pairs);
142
143         PMD_INIT_FUNC_TRACE();
144         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
145                 if (qp[i] == NULL) {
146                         CAAM_JR_WARN("Uninitialised queue pair");
147                         continue;
148                 }
149                 qp[i]->rx_pkts = 0;
150                 qp[i]->rx_errs = 0;
151                 qp[i]->rx_poll_err = 0;
152                 qp[i]->tx_pkts = 0;
153                 qp[i]->tx_errs = 0;
154                 qp[i]->tx_ring_full = 0;
155         }
156 }
157
158 static inline int
159 is_cipher_only(struct caam_jr_session *ses)
160 {
161         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
162                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
163 }
164
165 static inline int
166 is_auth_only(struct caam_jr_session *ses)
167 {
168         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
169                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
170 }
171
172 static inline int
173 is_aead(struct caam_jr_session *ses)
174 {
175         return ((ses->cipher_alg == 0) &&
176                 (ses->auth_alg == 0) &&
177                 (ses->aead_alg != 0));
178 }
179
180 static inline int
181 is_auth_cipher(struct caam_jr_session *ses)
182 {
183         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
184                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
185                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
186 }
187
188 static inline int
189 is_proto_ipsec(struct caam_jr_session *ses)
190 {
191         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
192 }
193
194 static inline int
195 is_encode(struct caam_jr_session *ses)
196 {
197         return ses->dir == DIR_ENC;
198 }
199
200 static inline int
201 is_decode(struct caam_jr_session *ses)
202 {
203         return ses->dir == DIR_DEC;
204 }
205
206 static inline void
207 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
208 {
209         switch (ses->auth_alg) {
210         case RTE_CRYPTO_AUTH_NULL:
211                 ses->digest_length = 0;
212                 break;
213         case RTE_CRYPTO_AUTH_MD5_HMAC:
214                 alginfo_a->algtype =
215                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
216                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
217                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
218                 break;
219         case RTE_CRYPTO_AUTH_SHA1_HMAC:
220                 alginfo_a->algtype =
221                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
222                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
223                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
224                 break;
225         case RTE_CRYPTO_AUTH_SHA224_HMAC:
226                 alginfo_a->algtype =
227                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
228                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
229                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
230                 break;
231         case RTE_CRYPTO_AUTH_SHA256_HMAC:
232                 alginfo_a->algtype =
233                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
234                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
235                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
236                 break;
237         case RTE_CRYPTO_AUTH_SHA384_HMAC:
238                 alginfo_a->algtype =
239                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
240                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
241                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
242                 break;
243         case RTE_CRYPTO_AUTH_SHA512_HMAC:
244                 alginfo_a->algtype =
245                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
246                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
247                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
248                 break;
249         default:
250                 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
251         }
252 }
253
254 static inline void
255 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
256 {
257         switch (ses->cipher_alg) {
258         case RTE_CRYPTO_CIPHER_NULL:
259                 break;
260         case RTE_CRYPTO_CIPHER_AES_CBC:
261                 alginfo_c->algtype =
262                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
263                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
264                 alginfo_c->algmode = OP_ALG_AAI_CBC;
265                 break;
266         case RTE_CRYPTO_CIPHER_3DES_CBC:
267                 alginfo_c->algtype =
268                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
269                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
270                 alginfo_c->algmode = OP_ALG_AAI_CBC;
271                 break;
272         case RTE_CRYPTO_CIPHER_AES_CTR:
273                 alginfo_c->algtype =
274                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
275                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
276                 alginfo_c->algmode = OP_ALG_AAI_CTR;
277                 break;
278         default:
279                 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
280         }
281 }
282
283 static inline void
284 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
285 {
286         switch (ses->aead_alg) {
287         case RTE_CRYPTO_AEAD_AES_GCM:
288                 alginfo->algtype = OP_ALG_ALGSEL_AES;
289                 alginfo->algmode = OP_ALG_AAI_GCM;
290                 break;
291         default:
292                 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
293         }
294 }
295
296 /* prepare command block of the session */
297 static int
298 caam_jr_prep_cdb(struct caam_jr_session *ses)
299 {
300         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
301         int32_t shared_desc_len = 0;
302         struct sec_cdb *cdb;
303         int err;
304 #if CAAM_BYTE_ORDER == CORE_BYTE_ORDER
305         int swap = false;
306 #else
307         int swap = true;
308 #endif
309
310         if (ses->cdb)
311                 caam_jr_dma_free(ses->cdb);
312
313         cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
314         if (!cdb) {
315                 CAAM_JR_ERR("failed to allocate memory for cdb\n");
316                 return -1;
317         }
318
319         ses->cdb = cdb;
320
321         memset(cdb, 0, sizeof(struct sec_cdb));
322
323         if (is_cipher_only(ses)) {
324                 caam_cipher_alg(ses, &alginfo_c);
325                 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
326                         CAAM_JR_ERR("not supported cipher alg");
327                         rte_free(cdb);
328                         return -ENOTSUP;
329                 }
330
331                 alginfo_c.key = (size_t)ses->cipher_key.data;
332                 alginfo_c.keylen = ses->cipher_key.length;
333                 alginfo_c.key_enc_flags = 0;
334                 alginfo_c.key_type = RTA_DATA_IMM;
335
336                 shared_desc_len = cnstr_shdsc_blkcipher(
337                                                 cdb->sh_desc, true,
338                                                 swap, SHR_NEVER, &alginfo_c,
339                                                 NULL,
340                                                 ses->iv.length,
341                                                 ses->dir);
342         } else if (is_auth_only(ses)) {
343                 caam_auth_alg(ses, &alginfo_a);
344                 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
345                         CAAM_JR_ERR("not supported auth alg");
346                         rte_free(cdb);
347                         return -ENOTSUP;
348                 }
349
350                 alginfo_a.key = (size_t)ses->auth_key.data;
351                 alginfo_a.keylen = ses->auth_key.length;
352                 alginfo_a.key_enc_flags = 0;
353                 alginfo_a.key_type = RTA_DATA_IMM;
354
355                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
356                                                    swap, SHR_NEVER, &alginfo_a,
357                                                    !ses->dir,
358                                                    ses->digest_length);
359         } else if (is_aead(ses)) {
360                 caam_aead_alg(ses, &alginfo);
361                 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
362                         CAAM_JR_ERR("not supported aead alg");
363                         rte_free(cdb);
364                         return -ENOTSUP;
365                 }
366                 alginfo.key = (size_t)ses->aead_key.data;
367                 alginfo.keylen = ses->aead_key.length;
368                 alginfo.key_enc_flags = 0;
369                 alginfo.key_type = RTA_DATA_IMM;
370
371                 if (ses->dir == DIR_ENC)
372                         shared_desc_len = cnstr_shdsc_gcm_encap(
373                                         cdb->sh_desc, true, swap,
374                                         SHR_NEVER, &alginfo,
375                                         ses->iv.length,
376                                         ses->digest_length);
377                 else
378                         shared_desc_len = cnstr_shdsc_gcm_decap(
379                                         cdb->sh_desc, true, swap,
380                                         SHR_NEVER, &alginfo,
381                                         ses->iv.length,
382                                         ses->digest_length);
383         } else {
384                 caam_cipher_alg(ses, &alginfo_c);
385                 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
386                         CAAM_JR_ERR("not supported cipher alg");
387                         rte_free(cdb);
388                         return -ENOTSUP;
389                 }
390
391                 alginfo_c.key = (size_t)ses->cipher_key.data;
392                 alginfo_c.keylen = ses->cipher_key.length;
393                 alginfo_c.key_enc_flags = 0;
394                 alginfo_c.key_type = RTA_DATA_IMM;
395
396                 caam_auth_alg(ses, &alginfo_a);
397                 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
398                         CAAM_JR_ERR("not supported auth alg");
399                         rte_free(cdb);
400                         return -ENOTSUP;
401                 }
402
403                 alginfo_a.key = (size_t)ses->auth_key.data;
404                 alginfo_a.keylen = ses->auth_key.length;
405                 alginfo_a.key_enc_flags = 0;
406                 alginfo_a.key_type = RTA_DATA_IMM;
407
408                 cdb->sh_desc[0] = alginfo_c.keylen;
409                 cdb->sh_desc[1] = alginfo_a.keylen;
410                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
411                                        MIN_JOB_DESC_SIZE,
412                                        (unsigned int *)cdb->sh_desc,
413                                        &cdb->sh_desc[2], 2);
414
415                 if (err < 0) {
416                         CAAM_JR_ERR("Crypto: Incorrect key lengths");
417                         rte_free(cdb);
418                         return err;
419                 }
420                 if (cdb->sh_desc[2] & 1)
421                         alginfo_c.key_type = RTA_DATA_IMM;
422                 else {
423                         alginfo_c.key = (size_t)caam_jr_mem_vtop(
424                                                 (void *)(size_t)alginfo_c.key);
425                         alginfo_c.key_type = RTA_DATA_PTR;
426                 }
427                 if (cdb->sh_desc[2] & (1<<1))
428                         alginfo_a.key_type = RTA_DATA_IMM;
429                 else {
430                         alginfo_a.key = (size_t)caam_jr_mem_vtop(
431                                                 (void *)(size_t)alginfo_a.key);
432                         alginfo_a.key_type = RTA_DATA_PTR;
433                 }
434                 cdb->sh_desc[0] = 0;
435                 cdb->sh_desc[1] = 0;
436                 cdb->sh_desc[2] = 0;
437                 if (is_proto_ipsec(ses)) {
438                         if (ses->dir == DIR_ENC) {
439                                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
440                                                 cdb->sh_desc,
441                                                 true, swap, SHR_SERIAL,
442                                                 &ses->encap_pdb,
443                                                 (uint8_t *)&ses->ip4_hdr,
444                                                 &alginfo_c, &alginfo_a);
445                         } else if (ses->dir == DIR_DEC) {
446                                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
447                                                 cdb->sh_desc,
448                                                 true, swap, SHR_SERIAL,
449                                                 &ses->decap_pdb,
450                                                 &alginfo_c, &alginfo_a);
451                         }
452                 } else {
453                         /* Auth_only_len is set as 0 here and it will be
454                          * overwritten in fd for each packet.
455                          */
456                         shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
457                                         true, swap, SHR_SERIAL,
458                                         &alginfo_c, &alginfo_a,
459                                         ses->iv.length, 0,
460                                         ses->digest_length, ses->dir);
461                 }
462         }
463
464         if (shared_desc_len < 0) {
465                 CAAM_JR_ERR("error in preparing command block");
466                 return shared_desc_len;
467         }
468
469 #if CAAM_JR_DBG
470         SEC_DUMP_DESC(cdb->sh_desc);
471 #endif
472
473         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
474
475         return 0;
476 }
477
478 /* @brief Poll the HW for already processed jobs in the JR
479  * and silently discard the available jobs or notify them to UA
480  * with indicated error code.
481  *
482  * @param [in,out]  job_ring        The job ring to poll.
483  * @param [in]  do_notify           Can be #TRUE or #FALSE. Indicates if
484  *                                  descriptors are to be discarded
485  *                                  or notified to UA with given error_code.
486  * @param [out] notified_descs    Number of notified descriptors. Can be NULL
487  *                                      if do_notify is #FALSE
488  */
489 static void
490 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
491                   uint32_t do_notify,
492                   uint32_t *notified_descs)
493 {
494         int32_t jobs_no_to_discard = 0;
495         int32_t discarded_descs_no = 0;
496
497         CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
498                 job_ring, job_ring->pidx, job_ring->cidx, do_notify);
499
500         jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
501
502         /* Discard all jobs */
503         CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
504                   job_ring, job_ring->pidx, job_ring->cidx,
505                   jobs_no_to_discard);
506
507         while (jobs_no_to_discard > discarded_descs_no) {
508                 discarded_descs_no++;
509                 /* Now increment the consumer index for the current job ring,
510                  * AFTER saving job in temporary location!
511                  * Increment the consumer index for the current job ring
512                  */
513                 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
514                                          SEC_JOB_RING_SIZE);
515
516                 hw_remove_entries(job_ring, 1);
517         }
518
519         if (do_notify == true) {
520                 ASSERT(notified_descs != NULL);
521                 *notified_descs = discarded_descs_no;
522         }
523 }
524
525 /* @brief Poll the HW for already processed jobs in the JR
526  * and notify the available jobs to UA.
527  *
528  * @param [in]  job_ring        The job ring to poll.
529  * @param [in]  limit           The maximum number of jobs to notify.
530  *                              If set to negative value, all available jobs are
531  *                              notified.
532  *
533  * @retval >=0 for No of jobs notified to UA.
534  * @retval -1 for error
535  */
536 static int
537 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
538                  struct rte_crypto_op **ops, int32_t limit,
539                  struct caam_jr_qp *jr_qp)
540 {
541         int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
542         int32_t number_of_jobs_available = 0;
543         int32_t notified_descs_no = 0;
544         uint32_t sec_error_code = 0;
545         struct job_descriptor *current_desc;
546         phys_addr_t current_desc_addr;
547         phys_addr_t *temp_addr;
548         struct caam_jr_op_ctx *ctx;
549
550         /* TODO check for ops have memory*/
551         /* check here if any JR error that cannot be written
552          * in the output status word has occurred
553          */
554         if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
555                 CAAM_JR_INFO("err received");
556                 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
557                                         GET_JR_REG(JRINT, job_ring));
558                 if (unlikely(sec_error_code)) {
559                         hw_job_ring_error_print(job_ring, sec_error_code);
560                         return -1;
561                 }
562         }
563         /* compute the number of jobs available in the job ring based on the
564          * producer and consumer index values.
565          */
566         number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
567         /* Compute the number of notifications that need to be raised to UA
568          * If limit > total number of done jobs -> notify all done jobs
569          * If limit = 0 -> error
570          * If limit < total number of done jobs -> notify a number
571          * of done jobs equal with limit
572          */
573         jobs_no_to_notify = (limit > number_of_jobs_available) ?
574                                 number_of_jobs_available : limit;
575         CAAM_JR_DP_DEBUG(
576                 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
577                 job_ring, job_ring->pidx, job_ring->cidx,
578                 limit, number_of_jobs_available, jobs_no_to_notify);
579
580         rte_smp_rmb();
581
582         while (jobs_no_to_notify > notified_descs_no) {
583                 static uint64_t false_alarm;
584                 static uint64_t real_poll;
585
586                 /* Get job status here */
587                 sec_error_code = job_ring->output_ring[job_ring->cidx].status;
588                 /* Get completed descriptor */
589                 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
590                 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
591
592                 real_poll++;
593                 /* todo check if it is false alarm no desc present */
594                 if (!current_desc_addr) {
595                         false_alarm++;
596                         printf("false alarm %" PRIu64 "real %" PRIu64
597                                 " sec_err =0x%x cidx Index =0%d\n",
598                                 false_alarm, real_poll,
599                                 sec_error_code, job_ring->cidx);
600                         rte_panic("CAAM JR descriptor NULL");
601                         return notified_descs_no;
602                 }
603                 current_desc = (struct job_descriptor *)
604                                 caam_jr_dma_ptov(current_desc_addr);
605                 /* now increment the consumer index for the current job ring,
606                  * AFTER saving job in temporary location!
607                  */
608                 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
609                                  SEC_JOB_RING_SIZE);
610                 /* Signal that the job has been processed and the slot is free*/
611                 hw_remove_entries(job_ring, 1);
612                 /*TODO for multiple ops, packets*/
613                 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
614                 if (unlikely(sec_error_code)) {
615                         CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
616                                 job_ring->cidx, sec_error_code);
617                         hw_handle_job_ring_error(job_ring, sec_error_code);
618                         //todo improve with exact errors
619                         ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
620                         jr_qp->rx_errs++;
621                 } else {
622                         ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
623 #if CAAM_JR_DBG
624                         if (ctx->op->sym->m_dst) {
625                                 rte_hexdump(stdout, "PROCESSED",
626                                 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
627                                 rte_pktmbuf_data_len(ctx->op->sym->m_dst));
628                         } else {
629                                 rte_hexdump(stdout, "PROCESSED",
630                                 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
631                                 rte_pktmbuf_data_len(ctx->op->sym->m_src));
632                         }
633 #endif
634                 }
635                 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
636                         struct ip *ip4_hdr;
637
638                         if (ctx->op->sym->m_dst) {
639                                 /*TODO check for ip header or other*/
640                                 ip4_hdr = (struct ip *)
641                                 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
642                                 ctx->op->sym->m_dst->pkt_len =
643                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
644                                 ctx->op->sym->m_dst->data_len =
645                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
646                         } else {
647                                 ip4_hdr = (struct ip *)
648                                 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
649                                 ctx->op->sym->m_src->pkt_len =
650                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
651                                 ctx->op->sym->m_src->data_len =
652                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
653                         }
654                 }
655                 *ops = ctx->op;
656                 caam_jr_op_ending(ctx);
657                 ops++;
658                 notified_descs_no++;
659         }
660         return notified_descs_no;
661 }
662
663 static uint16_t
664 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
665                        uint16_t nb_ops)
666 {
667         struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
668         struct sec_job_ring_t *ring = jr_qp->ring;
669         int num_rx;
670         int ret;
671
672         CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
673
674         /* Poll job ring
675          * If nb_ops < 0 -> poll JR until no more notifications are available.
676          * If nb_ops > 0 -> poll JR until limit is reached.
677          */
678
679         /* Run hw poll job ring */
680         num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
681         if (num_rx < 0) {
682                 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
683                 return 0;
684         }
685
686         CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
687
688         if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
689                 if (num_rx < nb_ops) {
690                         ret = caam_jr_enable_irqs(ring->irq_fd);
691                         SEC_ASSERT(ret == 0, ret,
692                         "Failed to enable irqs for job ring %p", ring);
693                 }
694         } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
695
696                 /* Always enable IRQ generation when in pure IRQ mode */
697                 ret = caam_jr_enable_irqs(ring->irq_fd);
698                 SEC_ASSERT(ret == 0, ret,
699                         "Failed to enable irqs for job ring %p", ring);
700         }
701
702         jr_qp->rx_pkts += num_rx;
703
704         return num_rx;
705 }
706
707 /**
708  * packet looks like:
709  *              |<----data_len------->|
710  *    |ip_header|ah_header|icv|payload|
711  *              ^
712  *              |
713  *         mbuf->pkt.data
714  */
715 static inline struct caam_jr_op_ctx *
716 build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
717 {
718         struct rte_crypto_sym_op *sym = op->sym;
719         struct rte_mbuf *mbuf = sym->m_src;
720         struct caam_jr_op_ctx *ctx;
721         struct sec4_sg_entry *sg;
722         int     length;
723         struct sec_cdb *cdb;
724         uint64_t sdesc_offset;
725         struct sec_job_descriptor_t *jobdescr;
726         uint8_t extra_segs;
727
728         if (is_decode(ses))
729                 extra_segs = 2;
730         else
731                 extra_segs = 1;
732
733         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
734                 CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
735                                 MAX_SG_ENTRIES);
736                 return NULL;
737         }
738
739         ctx = caam_jr_alloc_ctx(ses);
740         if (!ctx)
741                 return NULL;
742
743         ctx->op = op;
744
745         cdb = ses->cdb;
746         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
747
748         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
749
750         SEC_JD_INIT(jobdescr);
751         SEC_JD_SET_SD(jobdescr,
752                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
753                 cdb->sh_hdr.hi.field.idlen);
754
755         /* output */
756         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
757                         0, ses->digest_length);
758
759         /*input */
760         sg = &ctx->sg[0];
761         length = sym->auth.data.length;
762         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
763         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
764
765         /* Successive segs */
766         mbuf = mbuf->next;
767         while (mbuf) {
768                 sg++;
769                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
770                 sg->len = cpu_to_caam32(mbuf->data_len);
771                 mbuf = mbuf->next;
772         }
773
774         if (is_decode(ses)) {
775                 /* digest verification case */
776                 sg++;
777                 /* hash result or digest, save digest first */
778                 rte_memcpy(ctx->digest, sym->auth.digest.data,
779                            ses->digest_length);
780 #if CAAM_JR_DBG
781                 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
782 #endif
783                 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
784                 sg->len = cpu_to_caam32(ses->digest_length);
785                 length += ses->digest_length;
786         } else {
787                 sg->len -= ses->digest_length;
788         }
789
790         /* last element*/
791         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
792
793         SEC_JD_SET_IN_PTR(jobdescr,
794                 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
795         /* enabling sg list */
796         (jobdescr)->seq_in.command.word  |= 0x01000000;
797
798         return ctx;
799 }
800
801 static inline struct caam_jr_op_ctx *
802 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
803 {
804         struct rte_crypto_sym_op *sym = op->sym;
805         struct caam_jr_op_ctx *ctx;
806         struct sec4_sg_entry *sg;
807         rte_iova_t start_addr;
808         struct sec_cdb *cdb;
809         uint64_t sdesc_offset;
810         struct sec_job_descriptor_t *jobdescr;
811
812         ctx = caam_jr_alloc_ctx(ses);
813         if (!ctx)
814                 return NULL;
815
816         ctx->op = op;
817
818         cdb = ses->cdb;
819         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
820
821         start_addr = rte_pktmbuf_iova(sym->m_src);
822
823         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
824
825         SEC_JD_INIT(jobdescr);
826         SEC_JD_SET_SD(jobdescr,
827                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
828                 cdb->sh_hdr.hi.field.idlen);
829
830         /* output */
831         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
832                         0, ses->digest_length);
833
834         /*input */
835         if (is_decode(ses)) {
836                 sg = &ctx->sg[0];
837                 SEC_JD_SET_IN_PTR(jobdescr,
838                         (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
839                         (sym->auth.data.length + ses->digest_length));
840                 /* enabling sg list */
841                 (jobdescr)->seq_in.command.word  |= 0x01000000;
842
843                 /* hash result or digest, save digest first */
844                 rte_memcpy(ctx->digest, sym->auth.digest.data,
845                            ses->digest_length);
846                 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
847                 sg->len = cpu_to_caam32(sym->auth.data.length);
848
849 #if CAAM_JR_DBG
850                 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
851 #endif
852                 /* let's check digest by hw */
853                 sg++;
854                 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
855                 sg->len = cpu_to_caam32(ses->digest_length);
856                 /* last element*/
857                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
858         } else {
859                 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
860                         sym->auth.data.offset, sym->auth.data.length);
861         }
862         return ctx;
863 }
864
865 static inline struct caam_jr_op_ctx *
866 build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
867 {
868         struct rte_crypto_sym_op *sym = op->sym;
869         struct rte_mbuf *mbuf = sym->m_src;
870         struct caam_jr_op_ctx *ctx;
871         struct sec4_sg_entry *sg, *in_sg;
872         int length;
873         struct sec_cdb *cdb;
874         uint64_t sdesc_offset;
875         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
876                         ses->iv.offset);
877         struct sec_job_descriptor_t *jobdescr;
878         uint8_t reg_segs;
879
880         if (sym->m_dst) {
881                 mbuf = sym->m_dst;
882                 reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
883         } else {
884                 mbuf = sym->m_src;
885                 reg_segs = mbuf->nb_segs * 2 + 2;
886         }
887
888         if (reg_segs > MAX_SG_ENTRIES) {
889                 CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
890                                 MAX_SG_ENTRIES);
891                 return NULL;
892         }
893
894         ctx = caam_jr_alloc_ctx(ses);
895         if (!ctx)
896                 return NULL;
897
898         ctx->op = op;
899         cdb = ses->cdb;
900         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
901
902         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
903
904         SEC_JD_INIT(jobdescr);
905         SEC_JD_SET_SD(jobdescr,
906                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
907                 cdb->sh_hdr.hi.field.idlen);
908
909 #if CAAM_JR_DBG
910         CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
911                         sym->m_src->data_off, sym->cipher.data.offset,
912                         sym->cipher.data.length, ses->iv.length);
913 #endif
914         /* output */
915         if (sym->m_dst)
916                 mbuf = sym->m_dst;
917         else
918                 mbuf = sym->m_src;
919
920         sg = &ctx->sg[0];
921         length = sym->cipher.data.length;
922
923         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
924                 + sym->cipher.data.offset);
925         sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
926
927         /* Successive segs */
928         mbuf = mbuf->next;
929         while (mbuf) {
930                 sg++;
931                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
932                 sg->len = cpu_to_caam32(mbuf->data_len);
933                 mbuf = mbuf->next;
934         }
935         /* last element*/
936         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
937
938         SEC_JD_SET_OUT_PTR(jobdescr,
939                         (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
940                         length);
941         /*enabling sg bit */
942         (jobdescr)->seq_out.command.word  |= 0x01000000;
943
944         /*input */
945         sg++;
946         mbuf = sym->m_src;
947         in_sg = sg;
948
949         length = sym->cipher.data.length + ses->iv.length;
950
951         /* IV */
952         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
953         sg->len = cpu_to_caam32(ses->iv.length);
954
955         /* 1st seg */
956         sg++;
957         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
958                                 + sym->cipher.data.offset);
959         sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
960
961         /* Successive segs */
962         mbuf = mbuf->next;
963         while (mbuf) {
964                 sg++;
965                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
966                 sg->len = cpu_to_caam32(mbuf->data_len);
967                 mbuf = mbuf->next;
968         }
969         /* last element*/
970         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
971
972
973         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
974                                 length);
975         /*enabling sg bit */
976         (jobdescr)->seq_in.command.word  |= 0x01000000;
977
978         return ctx;
979 }
980
981 static inline struct caam_jr_op_ctx *
982 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
983 {
984         struct rte_crypto_sym_op *sym = op->sym;
985         struct caam_jr_op_ctx *ctx;
986         struct sec4_sg_entry *sg;
987         rte_iova_t src_start_addr, dst_start_addr;
988         struct sec_cdb *cdb;
989         uint64_t sdesc_offset;
990         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
991                         ses->iv.offset);
992         struct sec_job_descriptor_t *jobdescr;
993
994         ctx = caam_jr_alloc_ctx(ses);
995         if (!ctx)
996                 return NULL;
997
998         ctx->op = op;
999         cdb = ses->cdb;
1000         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1001
1002         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1003         if (sym->m_dst)
1004                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1005         else
1006                 dst_start_addr = src_start_addr;
1007
1008         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1009
1010         SEC_JD_INIT(jobdescr);
1011         SEC_JD_SET_SD(jobdescr,
1012                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1013                 cdb->sh_hdr.hi.field.idlen);
1014
1015 #if CAAM_JR_DBG
1016         CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
1017                         sym->m_src->data_off, sym->cipher.data.offset,
1018                         sym->cipher.data.length, ses->iv.length);
1019 #endif
1020         /* output */
1021         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
1022                         sym->cipher.data.offset,
1023                         sym->cipher.data.length + ses->iv.length);
1024
1025         /*input */
1026         sg = &ctx->sg[0];
1027         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
1028                                 sym->cipher.data.length + ses->iv.length);
1029         /*enabling sg bit */
1030         (jobdescr)->seq_in.command.word  |= 0x01000000;
1031
1032         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1033         sg->len = cpu_to_caam32(ses->iv.length);
1034
1035         sg = &ctx->sg[1];
1036         sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
1037         sg->len = cpu_to_caam32(sym->cipher.data.length);
1038         /* last element*/
1039         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1040
1041         return ctx;
1042 }
1043
1044 /* For decapsulation:
1045  *     Input:
1046  * +----+----------------+--------------------------------+-----+
1047  * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
1048  * +----+----------------+--------------------------------+-----+
1049  *     Output:
1050  * +----+--------------------------+
1051  * | Decrypted & authenticated data |
1052  * +----+--------------------------+
1053  */
1054
1055 static inline struct caam_jr_op_ctx *
1056 build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
1057 {
1058         struct rte_crypto_sym_op *sym = op->sym;
1059         struct caam_jr_op_ctx *ctx;
1060         struct sec4_sg_entry *sg, *out_sg, *in_sg;
1061         struct rte_mbuf *mbuf;
1062         uint32_t length = 0;
1063         struct sec_cdb *cdb;
1064         uint64_t sdesc_offset;
1065         uint8_t req_segs;
1066         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1067                         ses->iv.offset);
1068         struct sec_job_descriptor_t *jobdescr;
1069         uint32_t auth_only_len;
1070
1071         auth_only_len = op->sym->auth.data.length -
1072                                 op->sym->cipher.data.length;
1073
1074         if (sym->m_dst) {
1075                 mbuf = sym->m_dst;
1076                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1077         } else {
1078                 mbuf = sym->m_src;
1079                 req_segs = mbuf->nb_segs * 2 + 3;
1080         }
1081
1082         if (req_segs > MAX_SG_ENTRIES) {
1083                 CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1084                                 MAX_SG_ENTRIES);
1085                 return NULL;
1086         }
1087
1088         ctx = caam_jr_alloc_ctx(ses);
1089         if (!ctx)
1090                 return NULL;
1091
1092         ctx->op = op;
1093         cdb = ses->cdb;
1094         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1095
1096         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1097
1098         SEC_JD_INIT(jobdescr);
1099         SEC_JD_SET_SD(jobdescr,
1100                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1101                 cdb->sh_hdr.hi.field.idlen);
1102
1103         /* output */
1104         if (sym->m_dst)
1105                 mbuf = sym->m_dst;
1106         else
1107                 mbuf = sym->m_src;
1108
1109         out_sg = &ctx->sg[0];
1110         if (is_encode(ses))
1111                 length = sym->auth.data.length + ses->digest_length;
1112         else
1113                 length = sym->auth.data.length;
1114
1115         sg = &ctx->sg[0];
1116
1117         /* 1st seg */
1118         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1119                 + sym->auth.data.offset);
1120         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1121
1122         /* Successive segs */
1123         mbuf = mbuf->next;
1124         while (mbuf) {
1125                 sg++;
1126                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1127                 sg->len = cpu_to_caam32(mbuf->data_len);
1128                 mbuf = mbuf->next;
1129         }
1130
1131         if (is_encode(ses)) {
1132                 /* set auth output */
1133                 sg++;
1134                 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1135                 sg->len = cpu_to_caam32(ses->digest_length);
1136         }
1137         /* last element*/
1138         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1139
1140         SEC_JD_SET_OUT_PTR(jobdescr,
1141                            (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
1142         /* set sg bit */
1143         (jobdescr)->seq_out.command.word  |= 0x01000000;
1144
1145         /* input */
1146         sg++;
1147         mbuf = sym->m_src;
1148         in_sg = sg;
1149         if (is_encode(ses))
1150                 length = ses->iv.length + sym->auth.data.length;
1151         else
1152                 length = ses->iv.length + sym->auth.data.length
1153                                                 + ses->digest_length;
1154
1155         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1156         sg->len = cpu_to_caam32(ses->iv.length);
1157
1158         sg++;
1159         /* 1st seg */
1160         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1161                 + sym->auth.data.offset);
1162         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1163
1164         /* Successive segs */
1165         mbuf = mbuf->next;
1166         while (mbuf) {
1167                 sg++;
1168                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1169                 sg->len = cpu_to_caam32(mbuf->data_len);
1170                 mbuf = mbuf->next;
1171         }
1172
1173         if (is_decode(ses)) {
1174                 sg++;
1175                 rte_memcpy(ctx->digest, sym->auth.digest.data,
1176                        ses->digest_length);
1177                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1178                 sg->len = cpu_to_caam32(ses->digest_length);
1179         }
1180         /* last element*/
1181         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1182
1183         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
1184                                 length);
1185         /* set sg bit */
1186         (jobdescr)->seq_in.command.word  |= 0x01000000;
1187         /* Auth_only_len is set as 0 in descriptor and it is
1188          * overwritten here in the jd which will update
1189          * the DPOVRD reg.
1190          */
1191         if (auth_only_len)
1192                 /* set sg bit */
1193                 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1194
1195         return ctx;
1196 }
1197
1198 static inline struct caam_jr_op_ctx *
1199 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
1200 {
1201         struct rte_crypto_sym_op *sym = op->sym;
1202         struct caam_jr_op_ctx *ctx;
1203         struct sec4_sg_entry *sg;
1204         rte_iova_t src_start_addr, dst_start_addr;
1205         uint32_t length = 0;
1206         struct sec_cdb *cdb;
1207         uint64_t sdesc_offset;
1208         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1209                         ses->iv.offset);
1210         struct sec_job_descriptor_t *jobdescr;
1211         uint32_t auth_only_len;
1212
1213         auth_only_len = op->sym->auth.data.length -
1214                                 op->sym->cipher.data.length;
1215
1216         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1217         if (sym->m_dst)
1218                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1219         else
1220                 dst_start_addr = src_start_addr;
1221
1222         ctx = caam_jr_alloc_ctx(ses);
1223         if (!ctx)
1224                 return NULL;
1225
1226         ctx->op = op;
1227         cdb = ses->cdb;
1228         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1229
1230         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1231
1232         SEC_JD_INIT(jobdescr);
1233         SEC_JD_SET_SD(jobdescr,
1234                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1235                 cdb->sh_hdr.hi.field.idlen);
1236
1237         /* input */
1238         sg = &ctx->sg[0];
1239         if (is_encode(ses)) {
1240                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1241                 sg->len = cpu_to_caam32(ses->iv.length);
1242                 length += ses->iv.length;
1243
1244                 sg++;
1245                 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1246                 sg->len = cpu_to_caam32(sym->auth.data.length);
1247                 length += sym->auth.data.length;
1248                 /* last element*/
1249                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1250         } else {
1251                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1252                 sg->len = cpu_to_caam32(ses->iv.length);
1253                 length += ses->iv.length;
1254
1255                 sg++;
1256                 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1257                 sg->len = cpu_to_caam32(sym->auth.data.length);
1258                 length += sym->auth.data.length;
1259
1260                 rte_memcpy(ctx->digest, sym->auth.digest.data,
1261                        ses->digest_length);
1262                 sg++;
1263                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1264                 sg->len = cpu_to_caam32(ses->digest_length);
1265                 length += ses->digest_length;
1266                 /* last element*/
1267                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1268         }
1269
1270         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
1271                                 length);
1272         /* set sg bit */
1273         (jobdescr)->seq_in.command.word  |= 0x01000000;
1274
1275         /* output */
1276         sg = &ctx->sg[6];
1277
1278         sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
1279         sg->len = cpu_to_caam32(sym->cipher.data.length);
1280         length = sym->cipher.data.length;
1281
1282         if (is_encode(ses)) {
1283                 /* set auth output */
1284                 sg++;
1285                 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1286                 sg->len = cpu_to_caam32(ses->digest_length);
1287                 length += ses->digest_length;
1288         }
1289         /* last element*/
1290         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1291
1292         SEC_JD_SET_OUT_PTR(jobdescr,
1293                            (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
1294         /* set sg bit */
1295         (jobdescr)->seq_out.command.word  |= 0x01000000;
1296
1297         /* Auth_only_len is set as 0 in descriptor and it is
1298          * overwritten here in the jd which will update
1299          * the DPOVRD reg.
1300          */
1301         if (auth_only_len)
1302                 /* set sg bit */
1303                 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1304
1305         return ctx;
1306 }
1307
1308 static inline struct caam_jr_op_ctx *
1309 build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
1310 {
1311         struct rte_crypto_sym_op *sym = op->sym;
1312         struct caam_jr_op_ctx *ctx = NULL;
1313         phys_addr_t src_start_addr, dst_start_addr;
1314         struct sec_cdb *cdb;
1315         uint64_t sdesc_offset;
1316         struct sec_job_descriptor_t *jobdescr;
1317
1318         ctx = caam_jr_alloc_ctx(ses);
1319         if (!ctx)
1320                 return NULL;
1321         ctx->op = op;
1322
1323         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1324         if (sym->m_dst)
1325                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1326         else
1327                 dst_start_addr = src_start_addr;
1328
1329         cdb = ses->cdb;
1330         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1331
1332         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1333
1334         SEC_JD_INIT(jobdescr);
1335         SEC_JD_SET_SD(jobdescr,
1336                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1337                         cdb->sh_hdr.hi.field.idlen);
1338
1339         /* output */
1340         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
1341                         sym->m_src->buf_len - sym->m_src->data_off);
1342         /* input */
1343         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
1344                         sym->m_src->pkt_len);
1345         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1346
1347         return ctx;
1348 }
1349
1350 static int
1351 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
1352 {
1353         struct sec_job_ring_t *ring = qp->ring;
1354         struct caam_jr_session *ses;
1355         struct caam_jr_op_ctx *ctx = NULL;
1356         struct sec_job_descriptor_t *jobdescr __rte_unused;
1357
1358         switch (op->sess_type) {
1359         case RTE_CRYPTO_OP_WITH_SESSION:
1360                 ses = (struct caam_jr_session *)
1361                 get_sym_session_private_data(op->sym->session,
1362                                         cryptodev_driver_id);
1363                 break;
1364         case RTE_CRYPTO_OP_SECURITY_SESSION:
1365                 ses = (struct caam_jr_session *)
1366                         get_sec_session_private_data(
1367                                         op->sym->sec_session);
1368                 break;
1369         default:
1370                 CAAM_JR_DP_ERR("sessionless crypto op not supported");
1371                 qp->tx_errs++;
1372                 return -1;
1373         }
1374
1375         if (unlikely(!ses->qp || ses->qp != qp)) {
1376                 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
1377                 ses->qp = qp;
1378                 caam_jr_prep_cdb(ses);
1379         }
1380
1381         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1382                 if (is_auth_cipher(ses))
1383                         ctx = build_cipher_auth(op, ses);
1384                 else if (is_aead(ses))
1385                         goto err1;
1386                 else if (is_auth_only(ses))
1387                         ctx = build_auth_only(op, ses);
1388                 else if (is_cipher_only(ses))
1389                         ctx = build_cipher_only(op, ses);
1390                 else if (is_proto_ipsec(ses))
1391                         ctx = build_proto(op, ses);
1392         } else {
1393                 if (is_auth_cipher(ses))
1394                         ctx = build_cipher_auth_sg(op, ses);
1395                 else if (is_aead(ses))
1396                         goto err1;
1397                 else if (is_auth_only(ses))
1398                         ctx = build_auth_only_sg(op, ses);
1399                 else if (is_cipher_only(ses))
1400                         ctx = build_cipher_only_sg(op, ses);
1401         }
1402 err1:
1403         if (unlikely(!ctx)) {
1404                 qp->tx_errs++;
1405                 CAAM_JR_ERR("not supported sec op");
1406                 return -1;
1407         }
1408 #if CAAM_JR_DBG
1409         if (is_decode(ses))
1410                 rte_hexdump(stdout, "DECODE",
1411                         rte_pktmbuf_mtod(op->sym->m_src, void *),
1412                         rte_pktmbuf_data_len(op->sym->m_src));
1413         else
1414                 rte_hexdump(stdout, "ENCODE",
1415                         rte_pktmbuf_mtod(op->sym->m_src, void *),
1416                         rte_pktmbuf_data_len(op->sym->m_src));
1417
1418         printf("\n JD before conversion\n");
1419         for (int i = 0; i < 12; i++)
1420                 printf("\n 0x%08x", ctx->jobdes.desc[i]);
1421 #endif
1422
1423         CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
1424                       ring, ring->pidx, ring->cidx);
1425
1426         /* todo - do we want to retry */
1427         if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
1428                          SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
1429                 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
1430                               ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
1431                 caam_jr_op_ending(ctx);
1432                 qp->tx_ring_full++;
1433                 return -EBUSY;
1434         }
1435
1436 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
1437         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1438
1439         jobdescr->deschdr.command.word =
1440                 cpu_to_caam32(jobdescr->deschdr.command.word);
1441         jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
1442         jobdescr->seq_out.command.word =
1443                 cpu_to_caam32(jobdescr->seq_out.command.word);
1444         jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
1445         jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
1446         jobdescr->seq_in.command.word =
1447                 cpu_to_caam32(jobdescr->seq_in.command.word);
1448         jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
1449         jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
1450         jobdescr->load_dpovrd.command.word =
1451                 cpu_to_caam32(jobdescr->load_dpovrd.command.word);
1452         jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
1453 #endif
1454
1455         /* Set ptr in input ring to current descriptor  */
1456         sec_write_addr(&ring->input_ring[ring->pidx],
1457                         (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
1458         rte_smp_wmb();
1459
1460         /* Notify HW that a new job is enqueued */
1461         hw_enqueue_desc_on_job_ring(ring);
1462
1463         /* increment the producer index for the current job ring */
1464         ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
1465
1466         return 0;
1467 }
1468
1469 static uint16_t
1470 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1471                        uint16_t nb_ops)
1472 {
1473         /* Function to transmit the frames to given device and queuepair */
1474         uint32_t loop;
1475         int32_t ret;
1476         struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
1477         uint16_t num_tx = 0;
1478         /*Prepare each packet which is to be sent*/
1479         for (loop = 0; loop < nb_ops; loop++) {
1480                 ret = caam_jr_enqueue_op(ops[loop], jr_qp);
1481                 if (!ret)
1482                         num_tx++;
1483         }
1484
1485         jr_qp->tx_pkts += num_tx;
1486
1487         return num_tx;
1488 }
1489
1490 /* Release queue pair */
1491 static int
1492 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1493                            uint16_t qp_id)
1494 {
1495         struct sec_job_ring_t *internals;
1496         struct caam_jr_qp *qp = NULL;
1497
1498         PMD_INIT_FUNC_TRACE();
1499         CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1500
1501         internals = dev->data->dev_private;
1502         if (qp_id >= internals->max_nb_queue_pairs) {
1503                 CAAM_JR_ERR("Max supported qpid %d",
1504                              internals->max_nb_queue_pairs);
1505                 return -EINVAL;
1506         }
1507
1508         qp = &internals->qps[qp_id];
1509         qp->ring = NULL;
1510         dev->data->queue_pairs[qp_id] = NULL;
1511
1512         return 0;
1513 }
1514
1515 /* Setup a queue pair */
1516 static int
1517 caam_jr_queue_pair_setup(
1518                 struct rte_cryptodev *dev, uint16_t qp_id,
1519                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1520                 __rte_unused int socket_id)
1521 {
1522         struct sec_job_ring_t *internals;
1523         struct caam_jr_qp *qp = NULL;
1524
1525         PMD_INIT_FUNC_TRACE();
1526         CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1527
1528         internals = dev->data->dev_private;
1529         if (qp_id >= internals->max_nb_queue_pairs) {
1530                 CAAM_JR_ERR("Max supported qpid %d",
1531                              internals->max_nb_queue_pairs);
1532                 return -EINVAL;
1533         }
1534
1535         qp = &internals->qps[qp_id];
1536         qp->ring = internals;
1537         dev->data->queue_pairs[qp_id] = qp;
1538
1539         return 0;
1540 }
1541
1542 /* Return the number of allocated queue pairs */
1543 static uint32_t
1544 caam_jr_queue_pair_count(struct rte_cryptodev *dev)
1545 {
1546         PMD_INIT_FUNC_TRACE();
1547
1548         return dev->data->nb_queue_pairs;
1549 }
1550
1551 /* Returns the size of the aesni gcm session structure */
1552 static unsigned int
1553 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1554 {
1555         PMD_INIT_FUNC_TRACE();
1556
1557         return sizeof(struct caam_jr_session);
1558 }
1559
1560 static int
1561 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1562                     struct rte_crypto_sym_xform *xform,
1563                     struct caam_jr_session *session)
1564 {
1565         session->cipher_alg = xform->cipher.algo;
1566         session->iv.length = xform->cipher.iv.length;
1567         session->iv.offset = xform->cipher.iv.offset;
1568         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1569                                                RTE_CACHE_LINE_SIZE);
1570         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1571                 CAAM_JR_ERR("No Memory for cipher key\n");
1572                 return -ENOMEM;
1573         }
1574         session->cipher_key.length = xform->cipher.key.length;
1575
1576         memcpy(session->cipher_key.data, xform->cipher.key.data,
1577                xform->cipher.key.length);
1578         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1579                         DIR_ENC : DIR_DEC;
1580
1581         return 0;
1582 }
1583
1584 static int
1585 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1586                   struct rte_crypto_sym_xform *xform,
1587                   struct caam_jr_session *session)
1588 {
1589         session->auth_alg = xform->auth.algo;
1590         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1591                                              RTE_CACHE_LINE_SIZE);
1592         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1593                 CAAM_JR_ERR("No Memory for auth key\n");
1594                 return -ENOMEM;
1595         }
1596         session->auth_key.length = xform->auth.key.length;
1597         session->digest_length = xform->auth.digest_length;
1598
1599         memcpy(session->auth_key.data, xform->auth.key.data,
1600                xform->auth.key.length);
1601         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1602                         DIR_ENC : DIR_DEC;
1603
1604         return 0;
1605 }
1606
1607 static int
1608 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1609                   struct rte_crypto_sym_xform *xform,
1610                   struct caam_jr_session *session)
1611 {
1612         session->aead_alg = xform->aead.algo;
1613         session->iv.length = xform->aead.iv.length;
1614         session->iv.offset = xform->aead.iv.offset;
1615         session->auth_only_len = xform->aead.aad_length;
1616         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1617                                              RTE_CACHE_LINE_SIZE);
1618         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1619                 CAAM_JR_ERR("No Memory for aead key\n");
1620                 return -ENOMEM;
1621         }
1622         session->aead_key.length = xform->aead.key.length;
1623         session->digest_length = xform->aead.digest_length;
1624
1625         memcpy(session->aead_key.data, xform->aead.key.data,
1626                xform->aead.key.length);
1627         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1628                         DIR_ENC : DIR_DEC;
1629
1630         return 0;
1631 }
1632
1633 static int
1634 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1635                                struct rte_crypto_sym_xform *xform, void *sess)
1636 {
1637         struct sec_job_ring_t *internals = dev->data->dev_private;
1638         struct caam_jr_session *session = sess;
1639
1640         PMD_INIT_FUNC_TRACE();
1641
1642         if (unlikely(sess == NULL)) {
1643                 CAAM_JR_ERR("invalid session struct");
1644                 return -EINVAL;
1645         }
1646
1647         /* Default IV length = 0 */
1648         session->iv.length = 0;
1649
1650         /* Cipher Only */
1651         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1652                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1653                 caam_jr_cipher_init(dev, xform, session);
1654
1655         /* Authentication Only */
1656         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1657                    xform->next == NULL) {
1658                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1659                 caam_jr_auth_init(dev, xform, session);
1660
1661         /* Cipher then Authenticate */
1662         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1663                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1664                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1665                         caam_jr_cipher_init(dev, xform, session);
1666                         caam_jr_auth_init(dev, xform->next, session);
1667                 } else {
1668                         CAAM_JR_ERR("Not supported: Auth then Cipher");
1669                         goto err1;
1670                 }
1671
1672         /* Authenticate then Cipher */
1673         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1674                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1675                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1676                         caam_jr_auth_init(dev, xform, session);
1677                         caam_jr_cipher_init(dev, xform->next, session);
1678                 } else {
1679                         CAAM_JR_ERR("Not supported: Auth then Cipher");
1680                         goto err1;
1681                 }
1682
1683         /* AEAD operation for AES-GCM kind of Algorithms */
1684         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1685                    xform->next == NULL) {
1686                 caam_jr_aead_init(dev, xform, session);
1687
1688         } else {
1689                 CAAM_JR_ERR("Invalid crypto type");
1690                 return -EINVAL;
1691         }
1692         session->ctx_pool = internals->ctx_pool;
1693
1694         return 0;
1695
1696 err1:
1697         rte_free(session->cipher_key.data);
1698         rte_free(session->auth_key.data);
1699         memset(session, 0, sizeof(struct caam_jr_session));
1700
1701         return -EINVAL;
1702 }
1703
1704 static int
1705 caam_jr_sym_session_configure(struct rte_cryptodev *dev,
1706                               struct rte_crypto_sym_xform *xform,
1707                               struct rte_cryptodev_sym_session *sess,
1708                               struct rte_mempool *mempool)
1709 {
1710         void *sess_private_data;
1711         int ret;
1712
1713         PMD_INIT_FUNC_TRACE();
1714
1715         if (rte_mempool_get(mempool, &sess_private_data)) {
1716                 CAAM_JR_ERR("Couldn't get object from session mempool");
1717                 return -ENOMEM;
1718         }
1719
1720         memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1721         ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1722         if (ret != 0) {
1723                 CAAM_JR_ERR("failed to configure session parameters");
1724                 /* Return session to mempool */
1725                 rte_mempool_put(mempool, sess_private_data);
1726                 return ret;
1727         }
1728
1729         set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
1730
1731         return 0;
1732 }
1733
1734 /* Clear the memory of session so it doesn't leave key material behind */
1735 static void
1736 caam_jr_sym_session_clear(struct rte_cryptodev *dev,
1737                 struct rte_cryptodev_sym_session *sess)
1738 {
1739         uint8_t index = dev->driver_id;
1740         void *sess_priv = get_sym_session_private_data(sess, index);
1741         struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1742
1743         PMD_INIT_FUNC_TRACE();
1744
1745         if (sess_priv) {
1746                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1747
1748                 rte_free(s->cipher_key.data);
1749                 rte_free(s->auth_key.data);
1750                 memset(s, 0, sizeof(struct caam_jr_session));
1751                 set_sym_session_private_data(sess, index, NULL);
1752                 rte_mempool_put(sess_mp, sess_priv);
1753         }
1754 }
1755
1756 static int
1757 caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1758                           struct rte_security_session_conf *conf,
1759                           void *sess)
1760 {
1761         struct sec_job_ring_t *internals = dev->data->dev_private;
1762         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1763         struct rte_crypto_auth_xform *auth_xform;
1764         struct rte_crypto_cipher_xform *cipher_xform;
1765         struct caam_jr_session *session = (struct caam_jr_session *)sess;
1766
1767         PMD_INIT_FUNC_TRACE();
1768
1769         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1770                 cipher_xform = &conf->crypto_xform->cipher;
1771                 auth_xform = &conf->crypto_xform->next->auth;
1772         } else {
1773                 auth_xform = &conf->crypto_xform->auth;
1774                 cipher_xform = &conf->crypto_xform->next->cipher;
1775         }
1776         session->proto_alg = conf->protocol;
1777         session->cipher_key.data = rte_zmalloc(NULL,
1778                                                cipher_xform->key.length,
1779                                                RTE_CACHE_LINE_SIZE);
1780         if (session->cipher_key.data == NULL &&
1781                         cipher_xform->key.length > 0) {
1782                 CAAM_JR_ERR("No Memory for cipher key\n");
1783                 return -ENOMEM;
1784         }
1785
1786         session->cipher_key.length = cipher_xform->key.length;
1787         session->auth_key.data = rte_zmalloc(NULL,
1788                                         auth_xform->key.length,
1789                                         RTE_CACHE_LINE_SIZE);
1790         if (session->auth_key.data == NULL &&
1791                         auth_xform->key.length > 0) {
1792                 CAAM_JR_ERR("No Memory for auth key\n");
1793                 rte_free(session->cipher_key.data);
1794                 return -ENOMEM;
1795         }
1796         session->auth_key.length = auth_xform->key.length;
1797         memcpy(session->cipher_key.data, cipher_xform->key.data,
1798                         cipher_xform->key.length);
1799         memcpy(session->auth_key.data, auth_xform->key.data,
1800                         auth_xform->key.length);
1801
1802         switch (auth_xform->algo) {
1803         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1804                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1805                 break;
1806         case RTE_CRYPTO_AUTH_MD5_HMAC:
1807                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1808                 break;
1809         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1810                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1811                 break;
1812         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1813                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1814                 break;
1815         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1816                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1817                 break;
1818         case RTE_CRYPTO_AUTH_AES_CMAC:
1819                 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1820                 break;
1821         case RTE_CRYPTO_AUTH_NULL:
1822                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1823                 break;
1824         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1825         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1826         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1827         case RTE_CRYPTO_AUTH_SHA1:
1828         case RTE_CRYPTO_AUTH_SHA256:
1829         case RTE_CRYPTO_AUTH_SHA512:
1830         case RTE_CRYPTO_AUTH_SHA224:
1831         case RTE_CRYPTO_AUTH_SHA384:
1832         case RTE_CRYPTO_AUTH_MD5:
1833         case RTE_CRYPTO_AUTH_AES_GMAC:
1834         case RTE_CRYPTO_AUTH_KASUMI_F9:
1835         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1836         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1837                 CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
1838                         auth_xform->algo);
1839                 goto out;
1840         default:
1841                 CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
1842                         auth_xform->algo);
1843                 goto out;
1844         }
1845
1846         switch (cipher_xform->algo) {
1847         case RTE_CRYPTO_CIPHER_AES_CBC:
1848                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1849                 break;
1850         case RTE_CRYPTO_CIPHER_3DES_CBC:
1851                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1852                 break;
1853         case RTE_CRYPTO_CIPHER_AES_CTR:
1854                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1855                 break;
1856         case RTE_CRYPTO_CIPHER_NULL:
1857         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1858         case RTE_CRYPTO_CIPHER_3DES_ECB:
1859         case RTE_CRYPTO_CIPHER_AES_ECB:
1860         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1861                 CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
1862                         cipher_xform->algo);
1863                 goto out;
1864         default:
1865                 CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
1866                         cipher_xform->algo);
1867                 goto out;
1868         }
1869
1870         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1871                 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1872                                 sizeof(session->ip4_hdr));
1873                 session->ip4_hdr.ip_v = IPVERSION;
1874                 session->ip4_hdr.ip_hl = 5;
1875                 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1876                                                 sizeof(session->ip4_hdr));
1877                 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1878                 session->ip4_hdr.ip_id = 0;
1879                 session->ip4_hdr.ip_off = 0;
1880                 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1881                 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1882                                 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1883                                 : IPPROTO_AH;
1884                 session->ip4_hdr.ip_sum = 0;
1885                 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1886                 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1887                 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1888                                                 (void *)&session->ip4_hdr,
1889                                                 sizeof(struct ip));
1890
1891                 session->encap_pdb.options =
1892                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1893                         PDBOPTS_ESP_OIHI_PDB_INL |
1894                         PDBOPTS_ESP_IVSRC |
1895                         PDBHMO_ESP_ENCAP_DTTL;
1896                 session->encap_pdb.spi = ipsec_xform->spi;
1897                 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1898
1899                 session->dir = DIR_ENC;
1900         } else if (ipsec_xform->direction ==
1901                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1902                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1903                 session->decap_pdb.options = sizeof(struct ip) << 16;
1904                 session->dir = DIR_DEC;
1905         } else
1906                 goto out;
1907         session->ctx_pool = internals->ctx_pool;
1908
1909         return 0;
1910 out:
1911         rte_free(session->auth_key.data);
1912         rte_free(session->cipher_key.data);
1913         memset(session, 0, sizeof(struct caam_jr_session));
1914         return -1;
1915 }
1916
1917 static int
1918 caam_jr_security_session_create(void *dev,
1919                                 struct rte_security_session_conf *conf,
1920                                 struct rte_security_session *sess,
1921                                 struct rte_mempool *mempool)
1922 {
1923         void *sess_private_data;
1924         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1925         int ret;
1926
1927         if (rte_mempool_get(mempool, &sess_private_data)) {
1928                 CAAM_JR_ERR("Couldn't get object from session mempool");
1929                 return -ENOMEM;
1930         }
1931
1932         switch (conf->protocol) {
1933         case RTE_SECURITY_PROTOCOL_IPSEC:
1934                 ret = caam_jr_set_ipsec_session(cdev, conf,
1935                                 sess_private_data);
1936                 break;
1937         case RTE_SECURITY_PROTOCOL_MACSEC:
1938                 return -ENOTSUP;
1939         default:
1940                 return -EINVAL;
1941         }
1942         if (ret != 0) {
1943                 CAAM_JR_ERR("failed to configure session parameters");
1944                 /* Return session to mempool */
1945                 rte_mempool_put(mempool, sess_private_data);
1946                 return ret;
1947         }
1948
1949         set_sec_session_private_data(sess, sess_private_data);
1950
1951         return ret;
1952 }
1953
1954 /* Clear the memory of session so it doesn't leave key material behind */
1955 static int
1956 caam_jr_security_session_destroy(void *dev __rte_unused,
1957                                  struct rte_security_session *sess)
1958 {
1959         PMD_INIT_FUNC_TRACE();
1960         void *sess_priv = get_sec_session_private_data(sess);
1961
1962         struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1963
1964         if (sess_priv) {
1965                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1966
1967                 rte_free(s->cipher_key.data);
1968                 rte_free(s->auth_key.data);
1969                 memset(sess, 0, sizeof(struct caam_jr_session));
1970                 set_sec_session_private_data(sess, NULL);
1971                 rte_mempool_put(sess_mp, sess_priv);
1972         }
1973         return 0;
1974 }
1975
1976
1977 static int
1978 caam_jr_dev_configure(struct rte_cryptodev *dev,
1979                        struct rte_cryptodev_config *config __rte_unused)
1980 {
1981         char str[20];
1982         struct sec_job_ring_t *internals;
1983
1984         PMD_INIT_FUNC_TRACE();
1985
1986         internals = dev->data->dev_private;
1987         snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
1988         if (!internals->ctx_pool) {
1989                 internals->ctx_pool = rte_mempool_create((const char *)str,
1990                                                 CTX_POOL_NUM_BUFS,
1991                                                 sizeof(struct caam_jr_op_ctx),
1992                                                 CTX_POOL_CACHE_SIZE, 0,
1993                                                 NULL, NULL, NULL, NULL,
1994                                                 SOCKET_ID_ANY, 0);
1995                 if (!internals->ctx_pool) {
1996                         CAAM_JR_ERR("%s create failed\n", str);
1997                         return -ENOMEM;
1998                 }
1999         } else
2000                 CAAM_JR_INFO("mempool already created for dev_id : %d",
2001                                 dev->data->dev_id);
2002
2003         return 0;
2004 }
2005
2006 static int
2007 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
2008 {
2009         PMD_INIT_FUNC_TRACE();
2010         return 0;
2011 }
2012
2013 static void
2014 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
2015 {
2016         PMD_INIT_FUNC_TRACE();
2017 }
2018
2019 static int
2020 caam_jr_dev_close(struct rte_cryptodev *dev)
2021 {
2022         struct sec_job_ring_t *internals;
2023
2024         PMD_INIT_FUNC_TRACE();
2025
2026         if (dev == NULL)
2027                 return -ENOMEM;
2028
2029         internals = dev->data->dev_private;
2030         rte_mempool_free(internals->ctx_pool);
2031         internals->ctx_pool = NULL;
2032
2033         return 0;
2034 }
2035
2036 static void
2037 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
2038                        struct rte_cryptodev_info *info)
2039 {
2040         struct sec_job_ring_t *internals = dev->data->dev_private;
2041
2042         PMD_INIT_FUNC_TRACE();
2043         if (info != NULL) {
2044                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2045                 info->feature_flags = dev->feature_flags;
2046                 info->capabilities = caam_jr_get_cryptodev_capabilities();
2047                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2048                 info->driver_id = cryptodev_driver_id;
2049         }
2050 }
2051
2052 static struct rte_cryptodev_ops caam_jr_ops = {
2053         .dev_configure        = caam_jr_dev_configure,
2054         .dev_start            = caam_jr_dev_start,
2055         .dev_stop             = caam_jr_dev_stop,
2056         .dev_close            = caam_jr_dev_close,
2057         .dev_infos_get        = caam_jr_dev_infos_get,
2058         .stats_get            = caam_jr_stats_get,
2059         .stats_reset          = caam_jr_stats_reset,
2060         .queue_pair_setup     = caam_jr_queue_pair_setup,
2061         .queue_pair_release   = caam_jr_queue_pair_release,
2062         .queue_pair_count     = caam_jr_queue_pair_count,
2063         .sym_session_get_size = caam_jr_sym_session_get_size,
2064         .sym_session_configure = caam_jr_sym_session_configure,
2065         .sym_session_clear    = caam_jr_sym_session_clear
2066 };
2067
2068 static struct rte_security_ops caam_jr_security_ops = {
2069         .session_create = caam_jr_security_session_create,
2070         .session_update = NULL,
2071         .session_stats_get = NULL,
2072         .session_destroy = caam_jr_security_session_destroy,
2073         .set_pkt_metadata = NULL,
2074         .capabilities_get = caam_jr_get_security_capabilities
2075 };
2076
2077 /* @brief Flush job rings of any processed descs.
2078  * The processed descs are silently dropped,
2079  * WITHOUT being notified to UA.
2080  */
2081 static void
2082 close_job_ring(struct sec_job_ring_t *job_ring)
2083 {
2084         if (job_ring->irq_fd) {
2085                 /* Producer index is frozen. If consumer index is not equal
2086                  * with producer index, then we have descs to flush.
2087                  */
2088                 while (job_ring->pidx != job_ring->cidx)
2089                         hw_flush_job_ring(job_ring, false, NULL);
2090
2091                 /* free the uio job ring */
2092                 free_job_ring(job_ring->irq_fd);
2093                 job_ring->irq_fd = 0;
2094                 caam_jr_dma_free(job_ring->input_ring);
2095                 caam_jr_dma_free(job_ring->output_ring);
2096                 g_job_rings_no--;
2097         }
2098 }
2099
2100 /** @brief Release the software and hardware resources tied to a job ring.
2101  * @param [in] job_ring The job ring
2102  *
2103  * @retval  0 for success
2104  * @retval  -1 for error
2105  */
2106 static int
2107 shutdown_job_ring(struct sec_job_ring_t *job_ring)
2108 {
2109         int ret = 0;
2110
2111         PMD_INIT_FUNC_TRACE();
2112         ASSERT(job_ring != NULL);
2113         ret = hw_shutdown_job_ring(job_ring);
2114         SEC_ASSERT(ret == 0, ret,
2115                 "Failed to shutdown hardware job ring %p",
2116                 job_ring);
2117
2118         if (job_ring->coalescing_en)
2119                 hw_job_ring_disable_coalescing(job_ring);
2120
2121         if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
2122                 ret = caam_jr_disable_irqs(job_ring->irq_fd);
2123                 SEC_ASSERT(ret == 0, ret,
2124                 "Failed to disable irqs for job ring %p",
2125                 job_ring);
2126         }
2127
2128         return ret;
2129 }
2130
2131 /*
2132  * @brief Release the resources used by the SEC user space driver.
2133  *
2134  * Reset and release SEC's job rings indicated by the User Application at
2135  * init_job_ring() and free any memory allocated internally.
2136  * Call once during application tear down.
2137  *
2138  * @note In case there are any descriptors in-flight (descriptors received by
2139  * SEC driver for processing and for which no response was yet provided to UA),
2140  * the descriptors are discarded without any notifications to User Application.
2141  *
2142  * @retval ::0                  is returned for a successful execution
2143  * @retval ::-1         is returned if SEC driver release is in progress
2144  */
2145 static int
2146 caam_jr_dev_uninit(struct rte_cryptodev *dev)
2147 {
2148         struct sec_job_ring_t *internals;
2149
2150         PMD_INIT_FUNC_TRACE();
2151         if (dev == NULL)
2152                 return -ENODEV;
2153
2154         internals = dev->data->dev_private;
2155         rte_free(dev->security_ctx);
2156
2157         /* If any descriptors in flight , poll and wait
2158          * until all descriptors are received and silently discarded.
2159          */
2160         if (internals) {
2161                 shutdown_job_ring(internals);
2162                 close_job_ring(internals);
2163                 rte_mempool_free(internals->ctx_pool);
2164         }
2165
2166         CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
2167
2168         /* last caam jr instance) */
2169         if (g_job_rings_no == 0)
2170                 g_driver_state = SEC_DRIVER_STATE_IDLE;
2171
2172         return SEC_SUCCESS;
2173 }
2174
2175 /* @brief Initialize the software and hardware resources tied to a job ring.
2176  * @param [in] jr_mode;         Model to be used by SEC Driver to receive
2177  *                              notifications from SEC.  Can be either
2178  *                              of the three: #SEC_NOTIFICATION_TYPE_NAPI
2179  *                              #SEC_NOTIFICATION_TYPE_IRQ or
2180  *                              #SEC_NOTIFICATION_TYPE_POLL
2181  * @param [in] NAPI_mode        The NAPI work mode to configure a job ring at
2182  *                              startup. Used only when #SEC_NOTIFICATION_TYPE
2183  *                              is set to #SEC_NOTIFICATION_TYPE_NAPI.
2184  * @param [in] irq_coalescing_timer This value determines the maximum
2185  *                                      amount of time after processing a
2186  *                                      descriptor before raising an interrupt.
2187  * @param [in] irq_coalescing_count This value determines how many
2188  *                                      descriptors are completed before
2189  *                                      raising an interrupt.
2190  * @param [in] reg_base_addr,   The job ring base address register
2191  * @param [in] irq_id           The job ring interrupt identification number.
2192  * @retval  job_ring_handle for successful job ring configuration
2193  * @retval  NULL on error
2194  *
2195  */
2196 static void *
2197 init_job_ring(void *reg_base_addr, uint32_t irq_id)
2198 {
2199         struct sec_job_ring_t *job_ring = NULL;
2200         int i, ret = 0;
2201         int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
2202         int napi_mode = 0;
2203         int irq_coalescing_timer = 0;
2204         int irq_coalescing_count = 0;
2205
2206         for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
2207                 if (g_job_rings[i].irq_fd == 0) {
2208                         job_ring = &g_job_rings[i];
2209                         g_job_rings_no++;
2210                         break;
2211                 }
2212         }
2213         if (job_ring == NULL) {
2214                 CAAM_JR_ERR("No free job ring\n");
2215                 return NULL;
2216         }
2217
2218         job_ring->register_base_addr = reg_base_addr;
2219         job_ring->jr_mode = jr_mode;
2220         job_ring->napi_mode = 0;
2221         job_ring->irq_fd = irq_id;
2222
2223         /* Allocate mem for input and output ring */
2224
2225         /* Allocate memory for input ring */
2226         job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2227                                 SEC_DMA_MEM_INPUT_RING_SIZE);
2228         memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
2229
2230         /* Allocate memory for output ring */
2231         job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2232                                 SEC_DMA_MEM_OUTPUT_RING_SIZE);
2233         memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
2234
2235         /* Reset job ring in SEC hw and configure job ring registers */
2236         ret = hw_reset_job_ring(job_ring);
2237         if (ret != 0) {
2238                 CAAM_JR_ERR("Failed to reset hardware job ring");
2239                 goto cleanup;
2240         }
2241
2242         if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
2243         /* When SEC US driver works in NAPI mode, the UA can select
2244          * if the driver starts with IRQs on or off.
2245          */
2246                 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
2247                         CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
2248                                 job_ring);
2249                         ret = caam_jr_enable_irqs(job_ring->irq_fd);
2250                         if (ret != 0) {
2251                                 CAAM_JR_ERR("Failed to enable irqs for job ring");
2252                                 goto cleanup;
2253                         }
2254                 }
2255         } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
2256         /* When SEC US driver works in pure interrupt mode,
2257          * IRQ's are always enabled.
2258          */
2259                 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
2260                          job_ring);
2261                 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2262                 if (ret != 0) {
2263                         CAAM_JR_ERR("Failed to enable irqs for job ring");
2264                         goto cleanup;
2265                 }
2266         }
2267         if (irq_coalescing_timer || irq_coalescing_count) {
2268                 hw_job_ring_set_coalescing_param(job_ring,
2269                          irq_coalescing_timer,
2270                          irq_coalescing_count);
2271
2272                 hw_job_ring_enable_coalescing(job_ring);
2273                 job_ring->coalescing_en = 1;
2274         }
2275
2276         job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
2277         job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
2278         job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
2279
2280         return job_ring;
2281 cleanup:
2282         caam_jr_dma_free(job_ring->output_ring);
2283         caam_jr_dma_free(job_ring->input_ring);
2284         return NULL;
2285 }
2286
2287
2288 static int
2289 caam_jr_dev_init(const char *name,
2290                  struct rte_vdev_device *vdev,
2291                  struct rte_cryptodev_pmd_init_params *init_params)
2292 {
2293         struct rte_cryptodev *dev;
2294         struct rte_security_ctx *security_instance;
2295         struct uio_job_ring *job_ring;
2296         char str[RTE_CRYPTODEV_NAME_MAX_LEN];
2297
2298         PMD_INIT_FUNC_TRACE();
2299
2300         /* Validate driver state */
2301         if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
2302                 g_job_rings_max = sec_configure();
2303                 if (!g_job_rings_max) {
2304                         CAAM_JR_ERR("No job ring detected on UIO !!!!");
2305                         return -1;
2306                 }
2307                 /* Update driver state */
2308                 g_driver_state = SEC_DRIVER_STATE_STARTED;
2309         }
2310
2311         if (g_job_rings_no >= g_job_rings_max) {
2312                 CAAM_JR_ERR("No more job rings available max=%d!!!!",
2313                                 g_job_rings_max);
2314                 return -1;
2315         }
2316
2317         job_ring = config_job_ring();
2318         if (job_ring == NULL) {
2319                 CAAM_JR_ERR("failed to create job ring");
2320                 goto init_error;
2321         }
2322
2323         snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
2324
2325         dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2326         if (dev == NULL) {
2327                 CAAM_JR_ERR("failed to create cryptodev vdev");
2328                 goto cleanup;
2329         }
2330         /*TODO free it during teardown*/
2331         dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
2332                                                 job_ring->uio_fd);
2333
2334         if (!dev->data->dev_private) {
2335                 CAAM_JR_ERR("Ring memory allocation failed\n");
2336                 goto cleanup2;
2337         }
2338
2339         dev->driver_id = cryptodev_driver_id;
2340         dev->dev_ops = &caam_jr_ops;
2341
2342         /* register rx/tx burst functions for data path */
2343         dev->dequeue_burst = caam_jr_dequeue_burst;
2344         dev->enqueue_burst = caam_jr_enqueue_burst;
2345         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2346                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2347                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2348                         RTE_CRYPTODEV_FF_SECURITY |
2349                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2350                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2351                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2352                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2353                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2354
2355         /* For secondary processes, we don't initialise any further as primary
2356          * has already done this work. Only check we don't need a different
2357          * RX function
2358          */
2359         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2360                 CAAM_JR_WARN("Device already init by primary process");
2361                 return 0;
2362         }
2363
2364         /*TODO free it during teardown*/
2365         security_instance = rte_malloc("caam_jr",
2366                                 sizeof(struct rte_security_ctx), 0);
2367         if (security_instance == NULL) {
2368                 CAAM_JR_ERR("memory allocation failed\n");
2369                 //todo error handling.
2370                 goto cleanup2;
2371         }
2372
2373         security_instance->device = (void *)dev;
2374         security_instance->ops = &caam_jr_security_ops;
2375         security_instance->sess_cnt = 0;
2376         dev->security_ctx = security_instance;
2377
2378         RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
2379
2380         return 0;
2381
2382 cleanup2:
2383         caam_jr_dev_uninit(dev);
2384         rte_cryptodev_pmd_release_device(dev);
2385 cleanup:
2386         free_job_ring(job_ring->uio_fd);
2387 init_error:
2388         CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
2389                         init_params->name);
2390
2391         return -ENXIO;
2392 }
2393
2394 /** Initialise CAAM JR crypto device */
2395 static int
2396 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
2397 {
2398         struct rte_cryptodev_pmd_init_params init_params = {
2399                 "",
2400                 sizeof(struct sec_job_ring_t),
2401                 rte_socket_id(),
2402                 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2403         };
2404         const char *name;
2405         const char *input_args;
2406
2407         name = rte_vdev_device_name(vdev);
2408         if (name == NULL)
2409                 return -EINVAL;
2410
2411         input_args = rte_vdev_device_args(vdev);
2412         rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
2413
2414         /* if sec device version is not configured */
2415         if (!rta_get_sec_era()) {
2416                 const struct device_node *caam_node;
2417
2418                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2419                         const uint32_t *prop = of_get_property(caam_node,
2420                                         "fsl,sec-era",
2421                                         NULL);
2422                         if (prop) {
2423                                 rta_set_sec_era(
2424                                         INTL_SEC_ERA(cpu_to_caam32(*prop)));
2425                                 break;
2426                         }
2427                 }
2428         }
2429 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
2430         if (rta_get_sec_era() > RTA_SEC_ERA_8) {
2431                 RTE_LOG(ERR, PMD,
2432                 "CAAM is compiled in BE mode for device with sec era > 8???\n");
2433                 return -EINVAL;
2434         }
2435 #endif
2436
2437         return caam_jr_dev_init(name, vdev, &init_params);
2438 }
2439
2440 /** Uninitialise CAAM JR crypto device */
2441 static int
2442 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
2443 {
2444         struct rte_cryptodev *cryptodev;
2445         const char *name;
2446
2447         name = rte_vdev_device_name(vdev);
2448         if (name == NULL)
2449                 return -EINVAL;
2450
2451         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2452         if (cryptodev == NULL)
2453                 return -ENODEV;
2454
2455         caam_jr_dev_uninit(cryptodev);
2456
2457         return rte_cryptodev_pmd_destroy(cryptodev);
2458 }
2459
2460 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
2461         .probe = cryptodev_caam_jr_probe,
2462         .remove = cryptodev_caam_jr_remove
2463 };
2464
2465 static struct cryptodev_driver caam_jr_crypto_drv;
2466
2467 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
2468 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
2469         "max_nb_queue_pairs=<int>"
2470         "socket_id=<int>");
2471 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
2472                 cryptodev_driver_id);
2473
2474 RTE_INIT(caam_jr_init_log)
2475 {
2476         caam_jr_logtype = rte_log_register("pmd.crypto.caam");
2477         if (caam_jr_logtype >= 0)
2478                 rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);
2479 }