net/qede: re-add to meson
[dpdk.git] / drivers / crypto / caam_jr / caam_jr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017-2019 NXP
3  */
4
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <sched.h>
8 #include <net/if.h>
9
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
19
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
26
27 /* RTA header files */
28 #include <hw/desc/common.h>
29 #include <hw/desc/algo.h>
30 #include <of.h>
31 #ifdef RTE_LIBRTE_PMD_CAAM_JR_DEBUG
32 #define CAAM_JR_DBG     1
33 #else
34 #define CAAM_JR_DBG     0
35 #endif
36 #define CRYPTODEV_NAME_CAAM_JR_PMD      crypto_caam_jr
37 static uint8_t cryptodev_driver_id;
38 int caam_jr_logtype;
39
40 enum rta_sec_era rta_sec_era;
41
42 /* Lists the states possible for the SEC user space driver. */
43 enum sec_driver_state_e {
44         SEC_DRIVER_STATE_IDLE,          /* Driver not initialized */
45         SEC_DRIVER_STATE_STARTED,       /* Driver initialized and can be used*/
46         SEC_DRIVER_STATE_RELEASE,       /* Driver release is in progress */
47 };
48
49 /* Job rings used for communication with SEC HW */
50 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
51
52 /* The current state of SEC user space driver */
53 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
54
55 /* The number of job rings used by SEC user space driver */
56 static int g_job_rings_no;
57 static int g_job_rings_max;
58
59 struct sec_outring_entry {
60         phys_addr_t desc;       /* Pointer to completed descriptor */
61         uint32_t status;        /* Status for completed descriptor */
62 } __rte_packed;
63
64 /* virtual address conversin when mempool support is available for ctx */
65 static inline phys_addr_t
66 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
67 {
68         return (size_t)vaddr - ctx->vtop_offset;
69 }
70
71 static inline void
72 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
73 {
74         /* report op status to sym->op and then free the ctx memory  */
75         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
76 }
77
78 static inline struct caam_jr_op_ctx *
79 caam_jr_alloc_ctx(struct caam_jr_session *ses)
80 {
81         struct caam_jr_op_ctx *ctx;
82         int ret;
83
84         ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
85         if (!ctx || ret) {
86                 CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
87                 return NULL;
88         }
89         /*
90          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
91          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
92          * to clear all the SG entries. caam_jr_alloc_ctx() is called for
93          * each packet, memset is costlier than dcbz_64().
94          */
95         dcbz_64(&ctx->sg[SG_CACHELINE_0]);
96         dcbz_64(&ctx->sg[SG_CACHELINE_1]);
97         dcbz_64(&ctx->sg[SG_CACHELINE_2]);
98         dcbz_64(&ctx->sg[SG_CACHELINE_3]);
99
100         ctx->ctx_pool = ses->ctx_pool;
101         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
102
103         return ctx;
104 }
105
106 static
107 void caam_jr_stats_get(struct rte_cryptodev *dev,
108                         struct rte_cryptodev_stats *stats)
109 {
110         struct caam_jr_qp **qp = (struct caam_jr_qp **)
111                                         dev->data->queue_pairs;
112         int i;
113
114         PMD_INIT_FUNC_TRACE();
115         if (stats == NULL) {
116                 CAAM_JR_ERR("Invalid stats ptr NULL");
117                 return;
118         }
119         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
120                 if (qp[i] == NULL) {
121                         CAAM_JR_WARN("Uninitialised queue pair");
122                         continue;
123                 }
124
125                 stats->enqueued_count += qp[i]->tx_pkts;
126                 stats->dequeued_count += qp[i]->rx_pkts;
127                 stats->enqueue_err_count += qp[i]->tx_errs;
128                 stats->dequeue_err_count += qp[i]->rx_errs;
129                 CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
130                              "\n\tTX Ring Full = %" PRIu64,
131                              qp[i]->rx_poll_err,
132                              qp[i]->tx_ring_full);
133         }
134 }
135
136 static
137 void caam_jr_stats_reset(struct rte_cryptodev *dev)
138 {
139         int i;
140         struct caam_jr_qp **qp = (struct caam_jr_qp **)
141                                    (dev->data->queue_pairs);
142
143         PMD_INIT_FUNC_TRACE();
144         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
145                 if (qp[i] == NULL) {
146                         CAAM_JR_WARN("Uninitialised queue pair");
147                         continue;
148                 }
149                 qp[i]->rx_pkts = 0;
150                 qp[i]->rx_errs = 0;
151                 qp[i]->rx_poll_err = 0;
152                 qp[i]->tx_pkts = 0;
153                 qp[i]->tx_errs = 0;
154                 qp[i]->tx_ring_full = 0;
155         }
156 }
157
158 static inline int
159 is_cipher_only(struct caam_jr_session *ses)
160 {
161         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
162                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
163 }
164
165 static inline int
166 is_auth_only(struct caam_jr_session *ses)
167 {
168         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
169                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
170 }
171
172 static inline int
173 is_aead(struct caam_jr_session *ses)
174 {
175         return ((ses->cipher_alg == 0) &&
176                 (ses->auth_alg == 0) &&
177                 (ses->aead_alg != 0));
178 }
179
180 static inline int
181 is_auth_cipher(struct caam_jr_session *ses)
182 {
183         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
184                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
185                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
186 }
187
188 static inline int
189 is_proto_ipsec(struct caam_jr_session *ses)
190 {
191         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
192 }
193
194 static inline int
195 is_encode(struct caam_jr_session *ses)
196 {
197         return ses->dir == DIR_ENC;
198 }
199
200 static inline int
201 is_decode(struct caam_jr_session *ses)
202 {
203         return ses->dir == DIR_DEC;
204 }
205
206 static inline void
207 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
208 {
209         switch (ses->auth_alg) {
210         case RTE_CRYPTO_AUTH_NULL:
211                 ses->digest_length = 0;
212                 break;
213         case RTE_CRYPTO_AUTH_MD5_HMAC:
214                 alginfo_a->algtype =
215                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
216                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
217                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
218                 break;
219         case RTE_CRYPTO_AUTH_SHA1_HMAC:
220                 alginfo_a->algtype =
221                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
222                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
223                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
224                 break;
225         case RTE_CRYPTO_AUTH_SHA224_HMAC:
226                 alginfo_a->algtype =
227                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
228                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
229                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
230                 break;
231         case RTE_CRYPTO_AUTH_SHA256_HMAC:
232                 alginfo_a->algtype =
233                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
234                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
235                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
236                 break;
237         case RTE_CRYPTO_AUTH_SHA384_HMAC:
238                 alginfo_a->algtype =
239                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
240                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
241                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
242                 break;
243         case RTE_CRYPTO_AUTH_SHA512_HMAC:
244                 alginfo_a->algtype =
245                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
246                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
247                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
248                 break;
249         default:
250                 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
251         }
252 }
253
254 static inline void
255 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
256 {
257         switch (ses->cipher_alg) {
258         case RTE_CRYPTO_CIPHER_NULL:
259                 break;
260         case RTE_CRYPTO_CIPHER_AES_CBC:
261                 alginfo_c->algtype =
262                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
263                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
264                 alginfo_c->algmode = OP_ALG_AAI_CBC;
265                 break;
266         case RTE_CRYPTO_CIPHER_3DES_CBC:
267                 alginfo_c->algtype =
268                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
269                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
270                 alginfo_c->algmode = OP_ALG_AAI_CBC;
271                 break;
272         case RTE_CRYPTO_CIPHER_AES_CTR:
273                 alginfo_c->algtype =
274                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
275                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
276                 alginfo_c->algmode = OP_ALG_AAI_CTR;
277                 break;
278         default:
279                 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
280         }
281 }
282
283 static inline void
284 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
285 {
286         switch (ses->aead_alg) {
287         case RTE_CRYPTO_AEAD_AES_GCM:
288                 alginfo->algtype = OP_ALG_ALGSEL_AES;
289                 alginfo->algmode = OP_ALG_AAI_GCM;
290                 break;
291         default:
292                 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
293         }
294 }
295
296 /* prepare command block of the session */
297 static int
298 caam_jr_prep_cdb(struct caam_jr_session *ses)
299 {
300         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
301         int32_t shared_desc_len = 0;
302         struct sec_cdb *cdb;
303         int err;
304 #if CAAM_BYTE_ORDER == CORE_BYTE_ORDER
305         int swap = false;
306 #else
307         int swap = true;
308 #endif
309
310         if (ses->cdb)
311                 caam_jr_dma_free(ses->cdb);
312
313         cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
314         if (!cdb) {
315                 CAAM_JR_ERR("failed to allocate memory for cdb\n");
316                 return -1;
317         }
318
319         ses->cdb = cdb;
320
321         memset(cdb, 0, sizeof(struct sec_cdb));
322
323         if (is_cipher_only(ses)) {
324                 caam_cipher_alg(ses, &alginfo_c);
325                 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
326                         CAAM_JR_ERR("not supported cipher alg");
327                         rte_free(cdb);
328                         return -ENOTSUP;
329                 }
330
331                 alginfo_c.key = (size_t)ses->cipher_key.data;
332                 alginfo_c.keylen = ses->cipher_key.length;
333                 alginfo_c.key_enc_flags = 0;
334                 alginfo_c.key_type = RTA_DATA_IMM;
335
336                 shared_desc_len = cnstr_shdsc_blkcipher(
337                                                 cdb->sh_desc, true,
338                                                 swap, SHR_NEVER, &alginfo_c,
339                                                 NULL,
340                                                 ses->iv.length,
341                                                 ses->dir);
342         } else if (is_auth_only(ses)) {
343                 caam_auth_alg(ses, &alginfo_a);
344                 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
345                         CAAM_JR_ERR("not supported auth alg");
346                         rte_free(cdb);
347                         return -ENOTSUP;
348                 }
349
350                 alginfo_a.key = (size_t)ses->auth_key.data;
351                 alginfo_a.keylen = ses->auth_key.length;
352                 alginfo_a.key_enc_flags = 0;
353                 alginfo_a.key_type = RTA_DATA_IMM;
354
355                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
356                                                    swap, SHR_NEVER, &alginfo_a,
357                                                    !ses->dir,
358                                                    ses->digest_length);
359         } else if (is_aead(ses)) {
360                 caam_aead_alg(ses, &alginfo);
361                 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
362                         CAAM_JR_ERR("not supported aead alg");
363                         rte_free(cdb);
364                         return -ENOTSUP;
365                 }
366                 alginfo.key = (size_t)ses->aead_key.data;
367                 alginfo.keylen = ses->aead_key.length;
368                 alginfo.key_enc_flags = 0;
369                 alginfo.key_type = RTA_DATA_IMM;
370
371                 if (ses->dir == DIR_ENC)
372                         shared_desc_len = cnstr_shdsc_gcm_encap(
373                                         cdb->sh_desc, true, swap,
374                                         SHR_NEVER, &alginfo,
375                                         ses->iv.length,
376                                         ses->digest_length);
377                 else
378                         shared_desc_len = cnstr_shdsc_gcm_decap(
379                                         cdb->sh_desc, true, swap,
380                                         SHR_NEVER, &alginfo,
381                                         ses->iv.length,
382                                         ses->digest_length);
383         } else {
384                 caam_cipher_alg(ses, &alginfo_c);
385                 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
386                         CAAM_JR_ERR("not supported cipher alg");
387                         rte_free(cdb);
388                         return -ENOTSUP;
389                 }
390
391                 alginfo_c.key = (size_t)ses->cipher_key.data;
392                 alginfo_c.keylen = ses->cipher_key.length;
393                 alginfo_c.key_enc_flags = 0;
394                 alginfo_c.key_type = RTA_DATA_IMM;
395
396                 caam_auth_alg(ses, &alginfo_a);
397                 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
398                         CAAM_JR_ERR("not supported auth alg");
399                         rte_free(cdb);
400                         return -ENOTSUP;
401                 }
402
403                 alginfo_a.key = (size_t)ses->auth_key.data;
404                 alginfo_a.keylen = ses->auth_key.length;
405                 alginfo_a.key_enc_flags = 0;
406                 alginfo_a.key_type = RTA_DATA_IMM;
407
408                 cdb->sh_desc[0] = alginfo_c.keylen;
409                 cdb->sh_desc[1] = alginfo_a.keylen;
410                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
411                                        MIN_JOB_DESC_SIZE,
412                                        (unsigned int *)cdb->sh_desc,
413                                        &cdb->sh_desc[2], 2);
414
415                 if (err < 0) {
416                         CAAM_JR_ERR("Crypto: Incorrect key lengths");
417                         rte_free(cdb);
418                         return err;
419                 }
420                 if (cdb->sh_desc[2] & 1)
421                         alginfo_c.key_type = RTA_DATA_IMM;
422                 else {
423                         alginfo_c.key = (size_t)caam_jr_mem_vtop(
424                                                 (void *)(size_t)alginfo_c.key);
425                         alginfo_c.key_type = RTA_DATA_PTR;
426                 }
427                 if (cdb->sh_desc[2] & (1<<1))
428                         alginfo_a.key_type = RTA_DATA_IMM;
429                 else {
430                         alginfo_a.key = (size_t)caam_jr_mem_vtop(
431                                                 (void *)(size_t)alginfo_a.key);
432                         alginfo_a.key_type = RTA_DATA_PTR;
433                 }
434                 cdb->sh_desc[0] = 0;
435                 cdb->sh_desc[1] = 0;
436                 cdb->sh_desc[2] = 0;
437                 if (is_proto_ipsec(ses)) {
438                         if (ses->dir == DIR_ENC) {
439                                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
440                                                 cdb->sh_desc,
441                                                 true, swap, SHR_SERIAL,
442                                                 &ses->encap_pdb,
443                                                 (uint8_t *)&ses->ip4_hdr,
444                                                 &alginfo_c, &alginfo_a);
445                         } else if (ses->dir == DIR_DEC) {
446                                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
447                                                 cdb->sh_desc,
448                                                 true, swap, SHR_SERIAL,
449                                                 &ses->decap_pdb,
450                                                 &alginfo_c, &alginfo_a);
451                         }
452                 } else {
453                         /* Auth_only_len is set as 0 here and it will be
454                          * overwritten in fd for each packet.
455                          */
456                         shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
457                                         true, swap, SHR_SERIAL,
458                                         &alginfo_c, &alginfo_a,
459                                         ses->iv.length, 0,
460                                         ses->digest_length, ses->dir);
461                 }
462         }
463
464         if (shared_desc_len < 0) {
465                 CAAM_JR_ERR("error in preparing command block");
466                 return shared_desc_len;
467         }
468
469 #if CAAM_JR_DBG
470         SEC_DUMP_DESC(cdb->sh_desc);
471 #endif
472
473         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
474
475         return 0;
476 }
477
478 /* @brief Poll the HW for already processed jobs in the JR
479  * and silently discard the available jobs or notify them to UA
480  * with indicated error code.
481  *
482  * @param [in,out]  job_ring        The job ring to poll.
483  * @param [in]  do_notify           Can be #TRUE or #FALSE. Indicates if
484  *                                  descriptors are to be discarded
485  *                                  or notified to UA with given error_code.
486  * @param [out] notified_descs    Number of notified descriptors. Can be NULL
487  *                                      if do_notify is #FALSE
488  */
489 static void
490 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
491                   uint32_t do_notify,
492                   uint32_t *notified_descs)
493 {
494         int32_t jobs_no_to_discard = 0;
495         int32_t discarded_descs_no = 0;
496
497         CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
498                 job_ring, job_ring->pidx, job_ring->cidx, do_notify);
499
500         jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
501
502         /* Discard all jobs */
503         CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
504                   job_ring, job_ring->pidx, job_ring->cidx,
505                   jobs_no_to_discard);
506
507         while (jobs_no_to_discard > discarded_descs_no) {
508                 discarded_descs_no++;
509                 /* Now increment the consumer index for the current job ring,
510                  * AFTER saving job in temporary location!
511                  * Increment the consumer index for the current job ring
512                  */
513                 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
514                                          SEC_JOB_RING_SIZE);
515
516                 hw_remove_entries(job_ring, 1);
517         }
518
519         if (do_notify == true) {
520                 ASSERT(notified_descs != NULL);
521                 *notified_descs = discarded_descs_no;
522         }
523 }
524
525 /* @brief Poll the HW for already processed jobs in the JR
526  * and notify the available jobs to UA.
527  *
528  * @param [in]  job_ring        The job ring to poll.
529  * @param [in]  limit           The maximum number of jobs to notify.
530  *                              If set to negative value, all available jobs are
531  *                              notified.
532  *
533  * @retval >=0 for No of jobs notified to UA.
534  * @retval -1 for error
535  */
536 static int
537 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
538                  struct rte_crypto_op **ops, int32_t limit,
539                  struct caam_jr_qp *jr_qp)
540 {
541         int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
542         int32_t number_of_jobs_available = 0;
543         int32_t notified_descs_no = 0;
544         uint32_t sec_error_code = 0;
545         struct job_descriptor *current_desc;
546         phys_addr_t current_desc_addr;
547         phys_addr_t *temp_addr;
548         struct caam_jr_op_ctx *ctx;
549
550         /* TODO check for ops have memory*/
551         /* check here if any JR error that cannot be written
552          * in the output status word has occurred
553          */
554         if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
555                 CAAM_JR_INFO("err received");
556                 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
557                                         GET_JR_REG(JRINT, job_ring));
558                 if (unlikely(sec_error_code)) {
559                         hw_job_ring_error_print(job_ring, sec_error_code);
560                         return -1;
561                 }
562         }
563         /* compute the number of jobs available in the job ring based on the
564          * producer and consumer index values.
565          */
566         number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
567         /* Compute the number of notifications that need to be raised to UA
568          * If limit > total number of done jobs -> notify all done jobs
569          * If limit = 0 -> error
570          * If limit < total number of done jobs -> notify a number
571          * of done jobs equal with limit
572          */
573         jobs_no_to_notify = (limit > number_of_jobs_available) ?
574                                 number_of_jobs_available : limit;
575         CAAM_JR_DP_DEBUG(
576                 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
577                 job_ring, job_ring->pidx, job_ring->cidx,
578                 limit, number_of_jobs_available, jobs_no_to_notify);
579
580         rte_smp_rmb();
581
582         while (jobs_no_to_notify > notified_descs_no) {
583                 static uint64_t false_alarm;
584                 static uint64_t real_poll;
585
586                 /* Get job status here */
587                 sec_error_code = job_ring->output_ring[job_ring->cidx].status;
588                 /* Get completed descriptor */
589                 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
590                 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
591
592                 real_poll++;
593                 /* todo check if it is false alarm no desc present */
594                 if (!current_desc_addr) {
595                         false_alarm++;
596                         printf("false alarm %" PRIu64 "real %" PRIu64
597                                 " sec_err =0x%x cidx Index =0%d\n",
598                                 false_alarm, real_poll,
599                                 sec_error_code, job_ring->cidx);
600                         rte_panic("CAAM JR descriptor NULL");
601                         return notified_descs_no;
602                 }
603                 current_desc = (struct job_descriptor *)
604                                 caam_jr_dma_ptov(current_desc_addr);
605                 /* now increment the consumer index for the current job ring,
606                  * AFTER saving job in temporary location!
607                  */
608                 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
609                                  SEC_JOB_RING_SIZE);
610                 /* Signal that the job has been processed and the slot is free*/
611                 hw_remove_entries(job_ring, 1);
612                 /*TODO for multiple ops, packets*/
613                 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
614                 if (unlikely(sec_error_code)) {
615                         CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
616                                 job_ring->cidx, sec_error_code);
617                         hw_handle_job_ring_error(job_ring, sec_error_code);
618                         //todo improve with exact errors
619                         ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
620                         jr_qp->rx_errs++;
621                 } else {
622                         ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
623 #if CAAM_JR_DBG
624                         if (ctx->op->sym->m_dst) {
625                                 rte_hexdump(stdout, "PROCESSED",
626                                 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
627                                 rte_pktmbuf_data_len(ctx->op->sym->m_dst));
628                         } else {
629                                 rte_hexdump(stdout, "PROCESSED",
630                                 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
631                                 rte_pktmbuf_data_len(ctx->op->sym->m_src));
632                         }
633 #endif
634                 }
635                 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
636                         struct ip *ip4_hdr;
637
638                         if (ctx->op->sym->m_dst) {
639                                 /*TODO check for ip header or other*/
640                                 ip4_hdr = (struct ip *)
641                                 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
642                                 ctx->op->sym->m_dst->pkt_len =
643                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
644                                 ctx->op->sym->m_dst->data_len =
645                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
646                         } else {
647                                 ip4_hdr = (struct ip *)
648                                 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
649                                 ctx->op->sym->m_src->pkt_len =
650                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
651                                 ctx->op->sym->m_src->data_len =
652                                         rte_be_to_cpu_16(ip4_hdr->ip_len);
653                         }
654                 }
655                 *ops = ctx->op;
656                 caam_jr_op_ending(ctx);
657                 ops++;
658                 notified_descs_no++;
659         }
660         return notified_descs_no;
661 }
662
663 static uint16_t
664 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
665                        uint16_t nb_ops)
666 {
667         struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
668         struct sec_job_ring_t *ring = jr_qp->ring;
669         int num_rx;
670         int ret;
671
672         CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
673
674         /* Poll job ring
675          * If nb_ops < 0 -> poll JR until no more notifications are available.
676          * If nb_ops > 0 -> poll JR until limit is reached.
677          */
678
679         /* Run hw poll job ring */
680         num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
681         if (num_rx < 0) {
682                 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
683                 return 0;
684         }
685
686         CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
687
688         if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
689                 if (num_rx < nb_ops) {
690                         ret = caam_jr_enable_irqs(ring->irq_fd);
691                         SEC_ASSERT(ret == 0, ret,
692                         "Failed to enable irqs for job ring %p", ring);
693                 }
694         } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
695
696                 /* Always enable IRQ generation when in pure IRQ mode */
697                 ret = caam_jr_enable_irqs(ring->irq_fd);
698                 SEC_ASSERT(ret == 0, ret,
699                         "Failed to enable irqs for job ring %p", ring);
700         }
701
702         jr_qp->rx_pkts += num_rx;
703
704         return num_rx;
705 }
706
707 /**
708  * packet looks like:
709  *              |<----data_len------->|
710  *    |ip_header|ah_header|icv|payload|
711  *              ^
712  *              |
713  *         mbuf->pkt.data
714  */
715 static inline struct caam_jr_op_ctx *
716 build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
717 {
718         struct rte_crypto_sym_op *sym = op->sym;
719         struct rte_mbuf *mbuf = sym->m_src;
720         struct caam_jr_op_ctx *ctx;
721         struct sec4_sg_entry *sg;
722         int     length;
723         struct sec_cdb *cdb;
724         uint64_t sdesc_offset;
725         struct sec_job_descriptor_t *jobdescr;
726         uint8_t extra_segs;
727
728         if (is_decode(ses))
729                 extra_segs = 2;
730         else
731                 extra_segs = 1;
732
733         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
734                 CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
735                                 MAX_SG_ENTRIES);
736                 return NULL;
737         }
738
739         ctx = caam_jr_alloc_ctx(ses);
740         if (!ctx)
741                 return NULL;
742
743         ctx->op = op;
744
745         cdb = ses->cdb;
746         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
747
748         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
749
750         SEC_JD_INIT(jobdescr);
751         SEC_JD_SET_SD(jobdescr,
752                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
753                 cdb->sh_hdr.hi.field.idlen);
754
755         /* output */
756         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
757                         0, ses->digest_length);
758
759         /*input */
760         sg = &ctx->sg[0];
761         length = sym->auth.data.length;
762         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
763         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
764
765         /* Successive segs */
766         mbuf = mbuf->next;
767         while (mbuf) {
768                 sg++;
769                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
770                 sg->len = cpu_to_caam32(mbuf->data_len);
771                 mbuf = mbuf->next;
772         }
773
774         if (is_decode(ses)) {
775                 /* digest verification case */
776                 sg++;
777                 /* hash result or digest, save digest first */
778                 rte_memcpy(ctx->digest, sym->auth.digest.data,
779                            ses->digest_length);
780 #if CAAM_JR_DBG
781                 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
782 #endif
783                 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
784                 sg->len = cpu_to_caam32(ses->digest_length);
785                 length += ses->digest_length;
786         } else {
787                 sg->len -= ses->digest_length;
788         }
789
790         /* last element*/
791         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
792
793         SEC_JD_SET_IN_PTR(jobdescr,
794                 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
795         /* enabling sg list */
796         (jobdescr)->seq_in.command.word  |= 0x01000000;
797
798         return ctx;
799 }
800
801 static inline struct caam_jr_op_ctx *
802 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
803 {
804         struct rte_crypto_sym_op *sym = op->sym;
805         struct caam_jr_op_ctx *ctx;
806         struct sec4_sg_entry *sg;
807         rte_iova_t start_addr;
808         struct sec_cdb *cdb;
809         uint64_t sdesc_offset;
810         struct sec_job_descriptor_t *jobdescr;
811
812         ctx = caam_jr_alloc_ctx(ses);
813         if (!ctx)
814                 return NULL;
815
816         ctx->op = op;
817
818         cdb = ses->cdb;
819         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
820
821         start_addr = rte_pktmbuf_iova(sym->m_src);
822
823         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
824
825         SEC_JD_INIT(jobdescr);
826         SEC_JD_SET_SD(jobdescr,
827                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
828                 cdb->sh_hdr.hi.field.idlen);
829
830         /* output */
831         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
832                         0, ses->digest_length);
833
834         /*input */
835         if (is_decode(ses)) {
836                 sg = &ctx->sg[0];
837                 SEC_JD_SET_IN_PTR(jobdescr,
838                         (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
839                         (sym->auth.data.length + ses->digest_length));
840                 /* enabling sg list */
841                 (jobdescr)->seq_in.command.word  |= 0x01000000;
842
843                 /* hash result or digest, save digest first */
844                 rte_memcpy(ctx->digest, sym->auth.digest.data,
845                            ses->digest_length);
846                 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
847                 sg->len = cpu_to_caam32(sym->auth.data.length);
848
849 #if CAAM_JR_DBG
850                 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
851 #endif
852                 /* let's check digest by hw */
853                 sg++;
854                 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
855                 sg->len = cpu_to_caam32(ses->digest_length);
856                 /* last element*/
857                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
858         } else {
859                 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
860                         sym->auth.data.offset, sym->auth.data.length);
861         }
862         return ctx;
863 }
864
865 static inline struct caam_jr_op_ctx *
866 build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
867 {
868         struct rte_crypto_sym_op *sym = op->sym;
869         struct rte_mbuf *mbuf = sym->m_src;
870         struct caam_jr_op_ctx *ctx;
871         struct sec4_sg_entry *sg, *in_sg;
872         int length;
873         struct sec_cdb *cdb;
874         uint64_t sdesc_offset;
875         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
876                         ses->iv.offset);
877         struct sec_job_descriptor_t *jobdescr;
878         uint8_t reg_segs;
879
880         if (sym->m_dst) {
881                 mbuf = sym->m_dst;
882                 reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
883         } else {
884                 mbuf = sym->m_src;
885                 reg_segs = mbuf->nb_segs * 2 + 2;
886         }
887
888         if (reg_segs > MAX_SG_ENTRIES) {
889                 CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
890                                 MAX_SG_ENTRIES);
891                 return NULL;
892         }
893
894         ctx = caam_jr_alloc_ctx(ses);
895         if (!ctx)
896                 return NULL;
897
898         ctx->op = op;
899         cdb = ses->cdb;
900         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
901
902         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
903
904         SEC_JD_INIT(jobdescr);
905         SEC_JD_SET_SD(jobdescr,
906                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
907                 cdb->sh_hdr.hi.field.idlen);
908
909 #if CAAM_JR_DBG
910         CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
911                         sym->m_src->data_off, sym->cipher.data.offset,
912                         sym->cipher.data.length, ses->iv.length);
913 #endif
914         /* output */
915         if (sym->m_dst)
916                 mbuf = sym->m_dst;
917         else
918                 mbuf = sym->m_src;
919
920         sg = &ctx->sg[0];
921         length = sym->cipher.data.length;
922
923         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
924                 + sym->cipher.data.offset);
925         sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
926
927         /* Successive segs */
928         mbuf = mbuf->next;
929         while (mbuf) {
930                 sg++;
931                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
932                 sg->len = cpu_to_caam32(mbuf->data_len);
933                 mbuf = mbuf->next;
934         }
935         /* last element*/
936         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
937
938         SEC_JD_SET_OUT_PTR(jobdescr,
939                         (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
940                         length);
941         /*enabling sg bit */
942         (jobdescr)->seq_out.command.word  |= 0x01000000;
943
944         /*input */
945         sg++;
946         mbuf = sym->m_src;
947         in_sg = sg;
948
949         length = sym->cipher.data.length + ses->iv.length;
950
951         /* IV */
952         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
953         sg->len = cpu_to_caam32(ses->iv.length);
954
955         /* 1st seg */
956         sg++;
957         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
958                                 + sym->cipher.data.offset);
959         sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
960
961         /* Successive segs */
962         mbuf = mbuf->next;
963         while (mbuf) {
964                 sg++;
965                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
966                 sg->len = cpu_to_caam32(mbuf->data_len);
967                 mbuf = mbuf->next;
968         }
969         /* last element*/
970         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
971
972
973         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
974                                 length);
975         /*enabling sg bit */
976         (jobdescr)->seq_in.command.word  |= 0x01000000;
977
978         return ctx;
979 }
980
981 static inline struct caam_jr_op_ctx *
982 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
983 {
984         struct rte_crypto_sym_op *sym = op->sym;
985         struct caam_jr_op_ctx *ctx;
986         struct sec4_sg_entry *sg;
987         rte_iova_t src_start_addr, dst_start_addr;
988         struct sec_cdb *cdb;
989         uint64_t sdesc_offset;
990         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
991                         ses->iv.offset);
992         struct sec_job_descriptor_t *jobdescr;
993
994         ctx = caam_jr_alloc_ctx(ses);
995         if (!ctx)
996                 return NULL;
997
998         ctx->op = op;
999         cdb = ses->cdb;
1000         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1001
1002         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1003         if (sym->m_dst)
1004                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1005         else
1006                 dst_start_addr = src_start_addr;
1007
1008         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1009
1010         SEC_JD_INIT(jobdescr);
1011         SEC_JD_SET_SD(jobdescr,
1012                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1013                 cdb->sh_hdr.hi.field.idlen);
1014
1015 #if CAAM_JR_DBG
1016         CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
1017                         sym->m_src->data_off, sym->cipher.data.offset,
1018                         sym->cipher.data.length, ses->iv.length);
1019 #endif
1020         /* output */
1021         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
1022                         sym->cipher.data.offset,
1023                         sym->cipher.data.length + ses->iv.length);
1024
1025         /*input */
1026         sg = &ctx->sg[0];
1027         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
1028                                 sym->cipher.data.length + ses->iv.length);
1029         /*enabling sg bit */
1030         (jobdescr)->seq_in.command.word  |= 0x01000000;
1031
1032         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1033         sg->len = cpu_to_caam32(ses->iv.length);
1034
1035         sg = &ctx->sg[1];
1036         sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
1037         sg->len = cpu_to_caam32(sym->cipher.data.length);
1038         /* last element*/
1039         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1040
1041         return ctx;
1042 }
1043
1044 /* For decapsulation:
1045  *     Input:
1046  * +----+----------------+--------------------------------+-----+
1047  * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
1048  * +----+----------------+--------------------------------+-----+
1049  *     Output:
1050  * +----+--------------------------+
1051  * | Decrypted & authenticated data |
1052  * +----+--------------------------+
1053  */
1054
1055 static inline struct caam_jr_op_ctx *
1056 build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
1057 {
1058         struct rte_crypto_sym_op *sym = op->sym;
1059         struct caam_jr_op_ctx *ctx;
1060         struct sec4_sg_entry *sg, *out_sg, *in_sg;
1061         struct rte_mbuf *mbuf;
1062         uint32_t length = 0;
1063         struct sec_cdb *cdb;
1064         uint64_t sdesc_offset;
1065         uint8_t req_segs;
1066         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1067                         ses->iv.offset);
1068         struct sec_job_descriptor_t *jobdescr;
1069         uint32_t auth_only_len;
1070
1071         auth_only_len = op->sym->auth.data.length -
1072                                 op->sym->cipher.data.length;
1073
1074         if (sym->m_dst) {
1075                 mbuf = sym->m_dst;
1076                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1077         } else {
1078                 mbuf = sym->m_src;
1079                 req_segs = mbuf->nb_segs * 2 + 3;
1080         }
1081
1082         if (req_segs > MAX_SG_ENTRIES) {
1083                 CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1084                                 MAX_SG_ENTRIES);
1085                 return NULL;
1086         }
1087
1088         ctx = caam_jr_alloc_ctx(ses);
1089         if (!ctx)
1090                 return NULL;
1091
1092         ctx->op = op;
1093         cdb = ses->cdb;
1094         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1095
1096         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1097
1098         SEC_JD_INIT(jobdescr);
1099         SEC_JD_SET_SD(jobdescr,
1100                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1101                 cdb->sh_hdr.hi.field.idlen);
1102
1103         /* output */
1104         if (sym->m_dst)
1105                 mbuf = sym->m_dst;
1106         else
1107                 mbuf = sym->m_src;
1108
1109         out_sg = &ctx->sg[0];
1110         if (is_encode(ses))
1111                 length = sym->auth.data.length + ses->digest_length;
1112         else
1113                 length = sym->auth.data.length;
1114
1115         sg = &ctx->sg[0];
1116
1117         /* 1st seg */
1118         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1119                 + sym->auth.data.offset);
1120         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1121
1122         /* Successive segs */
1123         mbuf = mbuf->next;
1124         while (mbuf) {
1125                 sg++;
1126                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1127                 sg->len = cpu_to_caam32(mbuf->data_len);
1128                 mbuf = mbuf->next;
1129         }
1130
1131         if (is_encode(ses)) {
1132                 /* set auth output */
1133                 sg++;
1134                 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1135                 sg->len = cpu_to_caam32(ses->digest_length);
1136         }
1137         /* last element*/
1138         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1139
1140         SEC_JD_SET_OUT_PTR(jobdescr,
1141                            (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
1142         /* set sg bit */
1143         (jobdescr)->seq_out.command.word  |= 0x01000000;
1144
1145         /* input */
1146         sg++;
1147         mbuf = sym->m_src;
1148         in_sg = sg;
1149         if (is_encode(ses))
1150                 length = ses->iv.length + sym->auth.data.length;
1151         else
1152                 length = ses->iv.length + sym->auth.data.length
1153                                                 + ses->digest_length;
1154
1155         sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1156         sg->len = cpu_to_caam32(ses->iv.length);
1157
1158         sg++;
1159         /* 1st seg */
1160         sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1161                 + sym->auth.data.offset);
1162         sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1163
1164         /* Successive segs */
1165         mbuf = mbuf->next;
1166         while (mbuf) {
1167                 sg++;
1168                 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1169                 sg->len = cpu_to_caam32(mbuf->data_len);
1170                 mbuf = mbuf->next;
1171         }
1172
1173         if (is_decode(ses)) {
1174                 sg++;
1175                 rte_memcpy(ctx->digest, sym->auth.digest.data,
1176                        ses->digest_length);
1177                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1178                 sg->len = cpu_to_caam32(ses->digest_length);
1179         }
1180         /* last element*/
1181         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1182
1183         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
1184                                 length);
1185         /* set sg bit */
1186         (jobdescr)->seq_in.command.word  |= 0x01000000;
1187         /* Auth_only_len is set as 0 in descriptor and it is
1188          * overwritten here in the jd which will update
1189          * the DPOVRD reg.
1190          */
1191         if (auth_only_len)
1192                 /* set sg bit */
1193                 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1194
1195         return ctx;
1196 }
1197
1198 static inline struct caam_jr_op_ctx *
1199 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
1200 {
1201         struct rte_crypto_sym_op *sym = op->sym;
1202         struct caam_jr_op_ctx *ctx;
1203         struct sec4_sg_entry *sg;
1204         rte_iova_t src_start_addr, dst_start_addr;
1205         uint32_t length = 0;
1206         struct sec_cdb *cdb;
1207         uint64_t sdesc_offset;
1208         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1209                         ses->iv.offset);
1210         struct sec_job_descriptor_t *jobdescr;
1211         uint32_t auth_only_len;
1212
1213         auth_only_len = op->sym->auth.data.length -
1214                                 op->sym->cipher.data.length;
1215
1216         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1217         if (sym->m_dst)
1218                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1219         else
1220                 dst_start_addr = src_start_addr;
1221
1222         ctx = caam_jr_alloc_ctx(ses);
1223         if (!ctx)
1224                 return NULL;
1225
1226         ctx->op = op;
1227         cdb = ses->cdb;
1228         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1229
1230         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1231
1232         SEC_JD_INIT(jobdescr);
1233         SEC_JD_SET_SD(jobdescr,
1234                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1235                 cdb->sh_hdr.hi.field.idlen);
1236
1237         /* input */
1238         sg = &ctx->sg[0];
1239         if (is_encode(ses)) {
1240                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1241                 sg->len = cpu_to_caam32(ses->iv.length);
1242                 length += ses->iv.length;
1243
1244                 sg++;
1245                 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1246                 sg->len = cpu_to_caam32(sym->auth.data.length);
1247                 length += sym->auth.data.length;
1248                 /* last element*/
1249                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1250         } else {
1251                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1252                 sg->len = cpu_to_caam32(ses->iv.length);
1253                 length += ses->iv.length;
1254
1255                 sg++;
1256                 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1257                 sg->len = cpu_to_caam32(sym->auth.data.length);
1258                 length += sym->auth.data.length;
1259
1260                 rte_memcpy(ctx->digest, sym->auth.digest.data,
1261                        ses->digest_length);
1262                 sg++;
1263                 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1264                 sg->len = cpu_to_caam32(ses->digest_length);
1265                 length += ses->digest_length;
1266                 /* last element*/
1267                 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1268         }
1269
1270         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
1271                                 length);
1272         /* set sg bit */
1273         (jobdescr)->seq_in.command.word  |= 0x01000000;
1274
1275         /* output */
1276         sg = &ctx->sg[6];
1277
1278         sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
1279         sg->len = cpu_to_caam32(sym->cipher.data.length);
1280         length = sym->cipher.data.length;
1281
1282         if (is_encode(ses)) {
1283                 /* set auth output */
1284                 sg++;
1285                 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1286                 sg->len = cpu_to_caam32(ses->digest_length);
1287                 length += ses->digest_length;
1288         }
1289         /* last element*/
1290         sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1291
1292         SEC_JD_SET_OUT_PTR(jobdescr,
1293                            (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
1294         /* set sg bit */
1295         (jobdescr)->seq_out.command.word  |= 0x01000000;
1296
1297         /* Auth_only_len is set as 0 in descriptor and it is
1298          * overwritten here in the jd which will update
1299          * the DPOVRD reg.
1300          */
1301         if (auth_only_len)
1302                 /* set sg bit */
1303                 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1304
1305         return ctx;
1306 }
1307
1308 static inline struct caam_jr_op_ctx *
1309 build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
1310 {
1311         struct rte_crypto_sym_op *sym = op->sym;
1312         struct caam_jr_op_ctx *ctx = NULL;
1313         phys_addr_t src_start_addr, dst_start_addr;
1314         struct sec_cdb *cdb;
1315         uint64_t sdesc_offset;
1316         struct sec_job_descriptor_t *jobdescr;
1317
1318         ctx = caam_jr_alloc_ctx(ses);
1319         if (!ctx)
1320                 return NULL;
1321         ctx->op = op;
1322
1323         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1324         if (sym->m_dst)
1325                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1326         else
1327                 dst_start_addr = src_start_addr;
1328
1329         cdb = ses->cdb;
1330         sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1331
1332         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1333
1334         SEC_JD_INIT(jobdescr);
1335         SEC_JD_SET_SD(jobdescr,
1336                 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1337                         cdb->sh_hdr.hi.field.idlen);
1338
1339         /* output */
1340         SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
1341                         sym->m_src->buf_len - sym->m_src->data_off);
1342         /* input */
1343         SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
1344                         sym->m_src->pkt_len);
1345         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1346
1347         return ctx;
1348 }
1349
1350 static int
1351 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
1352 {
1353         struct sec_job_ring_t *ring = qp->ring;
1354         struct caam_jr_session *ses;
1355         struct caam_jr_op_ctx *ctx = NULL;
1356         struct sec_job_descriptor_t *jobdescr __rte_unused;
1357
1358         switch (op->sess_type) {
1359         case RTE_CRYPTO_OP_WITH_SESSION:
1360                 ses = (struct caam_jr_session *)
1361                 get_sym_session_private_data(op->sym->session,
1362                                         cryptodev_driver_id);
1363                 break;
1364         case RTE_CRYPTO_OP_SECURITY_SESSION:
1365                 ses = (struct caam_jr_session *)
1366                         get_sec_session_private_data(
1367                                         op->sym->sec_session);
1368                 break;
1369         default:
1370                 CAAM_JR_DP_ERR("sessionless crypto op not supported");
1371                 qp->tx_errs++;
1372                 return -1;
1373         }
1374
1375         if (unlikely(!ses->qp || ses->qp != qp)) {
1376                 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
1377                 ses->qp = qp;
1378                 caam_jr_prep_cdb(ses);
1379         }
1380
1381         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1382                 if (is_auth_cipher(ses))
1383                         ctx = build_cipher_auth(op, ses);
1384                 else if (is_aead(ses))
1385                         goto err1;
1386                 else if (is_auth_only(ses))
1387                         ctx = build_auth_only(op, ses);
1388                 else if (is_cipher_only(ses))
1389                         ctx = build_cipher_only(op, ses);
1390                 else if (is_proto_ipsec(ses))
1391                         ctx = build_proto(op, ses);
1392         } else {
1393                 if (is_auth_cipher(ses))
1394                         ctx = build_cipher_auth_sg(op, ses);
1395                 else if (is_aead(ses))
1396                         goto err1;
1397                 else if (is_auth_only(ses))
1398                         ctx = build_auth_only_sg(op, ses);
1399                 else if (is_cipher_only(ses))
1400                         ctx = build_cipher_only_sg(op, ses);
1401         }
1402 err1:
1403         if (unlikely(!ctx)) {
1404                 qp->tx_errs++;
1405                 CAAM_JR_ERR("not supported sec op");
1406                 return -1;
1407         }
1408 #if CAAM_JR_DBG
1409         if (is_decode(ses))
1410                 rte_hexdump(stdout, "DECODE",
1411                         rte_pktmbuf_mtod(op->sym->m_src, void *),
1412                         rte_pktmbuf_data_len(op->sym->m_src));
1413         else
1414                 rte_hexdump(stdout, "ENCODE",
1415                         rte_pktmbuf_mtod(op->sym->m_src, void *),
1416                         rte_pktmbuf_data_len(op->sym->m_src));
1417
1418         printf("\n JD before conversion\n");
1419         for (int i = 0; i < 12; i++)
1420                 printf("\n 0x%08x", ctx->jobdes.desc[i]);
1421 #endif
1422
1423         CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
1424                       ring, ring->pidx, ring->cidx);
1425
1426         /* todo - do we want to retry */
1427         if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
1428                          SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
1429                 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
1430                               ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
1431                 caam_jr_op_ending(ctx);
1432                 qp->tx_ring_full++;
1433                 return -EBUSY;
1434         }
1435
1436 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
1437         jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1438
1439         jobdescr->deschdr.command.word =
1440                 cpu_to_caam32(jobdescr->deschdr.command.word);
1441         jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
1442         jobdescr->seq_out.command.word =
1443                 cpu_to_caam32(jobdescr->seq_out.command.word);
1444         jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
1445         jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
1446         jobdescr->seq_in.command.word =
1447                 cpu_to_caam32(jobdescr->seq_in.command.word);
1448         jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
1449         jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
1450         jobdescr->load_dpovrd.command.word =
1451                 cpu_to_caam32(jobdescr->load_dpovrd.command.word);
1452         jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
1453 #endif
1454
1455         /* Set ptr in input ring to current descriptor  */
1456         sec_write_addr(&ring->input_ring[ring->pidx],
1457                         (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
1458         rte_smp_wmb();
1459
1460         /* Notify HW that a new job is enqueued */
1461         hw_enqueue_desc_on_job_ring(ring);
1462
1463         /* increment the producer index for the current job ring */
1464         ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
1465
1466         return 0;
1467 }
1468
1469 static uint16_t
1470 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1471                        uint16_t nb_ops)
1472 {
1473         /* Function to transmit the frames to given device and queuepair */
1474         uint32_t loop;
1475         int32_t ret;
1476         struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
1477         uint16_t num_tx = 0;
1478         /*Prepare each packet which is to be sent*/
1479         for (loop = 0; loop < nb_ops; loop++) {
1480                 ret = caam_jr_enqueue_op(ops[loop], jr_qp);
1481                 if (!ret)
1482                         num_tx++;
1483         }
1484
1485         jr_qp->tx_pkts += num_tx;
1486
1487         return num_tx;
1488 }
1489
1490 /* Release queue pair */
1491 static int
1492 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1493                            uint16_t qp_id)
1494 {
1495         struct sec_job_ring_t *internals;
1496         struct caam_jr_qp *qp = NULL;
1497
1498         PMD_INIT_FUNC_TRACE();
1499         CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1500
1501         internals = dev->data->dev_private;
1502         if (qp_id >= internals->max_nb_queue_pairs) {
1503                 CAAM_JR_ERR("Max supported qpid %d",
1504                              internals->max_nb_queue_pairs);
1505                 return -EINVAL;
1506         }
1507
1508         qp = &internals->qps[qp_id];
1509         qp->ring = NULL;
1510         dev->data->queue_pairs[qp_id] = NULL;
1511
1512         return 0;
1513 }
1514
1515 /* Setup a queue pair */
1516 static int
1517 caam_jr_queue_pair_setup(
1518                 struct rte_cryptodev *dev, uint16_t qp_id,
1519                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1520                 __rte_unused int socket_id)
1521 {
1522         struct sec_job_ring_t *internals;
1523         struct caam_jr_qp *qp = NULL;
1524
1525         PMD_INIT_FUNC_TRACE();
1526         CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1527
1528         internals = dev->data->dev_private;
1529         if (qp_id >= internals->max_nb_queue_pairs) {
1530                 CAAM_JR_ERR("Max supported qpid %d",
1531                              internals->max_nb_queue_pairs);
1532                 return -EINVAL;
1533         }
1534
1535         qp = &internals->qps[qp_id];
1536         qp->ring = internals;
1537         dev->data->queue_pairs[qp_id] = qp;
1538
1539         return 0;
1540 }
1541
1542 /* Return the number of allocated queue pairs */
1543 static uint32_t
1544 caam_jr_queue_pair_count(struct rte_cryptodev *dev)
1545 {
1546         PMD_INIT_FUNC_TRACE();
1547
1548         return dev->data->nb_queue_pairs;
1549 }
1550
1551 /* Returns the size of the aesni gcm session structure */
1552 static unsigned int
1553 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1554 {
1555         PMD_INIT_FUNC_TRACE();
1556
1557         return sizeof(struct caam_jr_session);
1558 }
1559
1560 static int
1561 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1562                     struct rte_crypto_sym_xform *xform,
1563                     struct caam_jr_session *session)
1564 {
1565         session->cipher_alg = xform->cipher.algo;
1566         session->iv.length = xform->cipher.iv.length;
1567         session->iv.offset = xform->cipher.iv.offset;
1568         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1569                                                RTE_CACHE_LINE_SIZE);
1570         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1571                 CAAM_JR_ERR("No Memory for cipher key\n");
1572                 return -ENOMEM;
1573         }
1574         session->cipher_key.length = xform->cipher.key.length;
1575
1576         memcpy(session->cipher_key.data, xform->cipher.key.data,
1577                xform->cipher.key.length);
1578         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1579                         DIR_ENC : DIR_DEC;
1580
1581         return 0;
1582 }
1583
1584 static int
1585 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1586                   struct rte_crypto_sym_xform *xform,
1587                   struct caam_jr_session *session)
1588 {
1589         session->auth_alg = xform->auth.algo;
1590         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1591                                              RTE_CACHE_LINE_SIZE);
1592         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1593                 CAAM_JR_ERR("No Memory for auth key\n");
1594                 return -ENOMEM;
1595         }
1596         session->auth_key.length = xform->auth.key.length;
1597         session->digest_length = xform->auth.digest_length;
1598
1599         memcpy(session->auth_key.data, xform->auth.key.data,
1600                xform->auth.key.length);
1601         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1602                         DIR_ENC : DIR_DEC;
1603
1604         return 0;
1605 }
1606
1607 static int
1608 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1609                   struct rte_crypto_sym_xform *xform,
1610                   struct caam_jr_session *session)
1611 {
1612         session->aead_alg = xform->aead.algo;
1613         session->iv.length = xform->aead.iv.length;
1614         session->iv.offset = xform->aead.iv.offset;
1615         session->auth_only_len = xform->aead.aad_length;
1616         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1617                                              RTE_CACHE_LINE_SIZE);
1618         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1619                 CAAM_JR_ERR("No Memory for aead key\n");
1620                 return -ENOMEM;
1621         }
1622         session->aead_key.length = xform->aead.key.length;
1623         session->digest_length = xform->aead.digest_length;
1624
1625         memcpy(session->aead_key.data, xform->aead.key.data,
1626                xform->aead.key.length);
1627         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1628                         DIR_ENC : DIR_DEC;
1629
1630         return 0;
1631 }
1632
1633 static int
1634 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1635                                struct rte_crypto_sym_xform *xform, void *sess)
1636 {
1637         struct sec_job_ring_t *internals = dev->data->dev_private;
1638         struct caam_jr_session *session = sess;
1639
1640         PMD_INIT_FUNC_TRACE();
1641
1642         if (unlikely(sess == NULL)) {
1643                 CAAM_JR_ERR("invalid session struct");
1644                 return -EINVAL;
1645         }
1646
1647         /* Default IV length = 0 */
1648         session->iv.length = 0;
1649
1650         /* Cipher Only */
1651         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1652                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1653                 caam_jr_cipher_init(dev, xform, session);
1654
1655         /* Authentication Only */
1656         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1657                    xform->next == NULL) {
1658                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1659                 caam_jr_auth_init(dev, xform, session);
1660
1661         /* Cipher then Authenticate */
1662         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1663                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1664                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1665                         caam_jr_cipher_init(dev, xform, session);
1666                         caam_jr_auth_init(dev, xform->next, session);
1667                 } else {
1668                         CAAM_JR_ERR("Not supported: Auth then Cipher");
1669                         goto err1;
1670                 }
1671
1672         /* Authenticate then Cipher */
1673         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1674                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1675                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1676                         caam_jr_auth_init(dev, xform, session);
1677                         caam_jr_cipher_init(dev, xform->next, session);
1678                 } else {
1679                         CAAM_JR_ERR("Not supported: Auth then Cipher");
1680                         goto err1;
1681                 }
1682
1683         /* AEAD operation for AES-GCM kind of Algorithms */
1684         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1685                    xform->next == NULL) {
1686                 caam_jr_aead_init(dev, xform, session);
1687
1688         } else {
1689                 CAAM_JR_ERR("Invalid crypto type");
1690                 return -EINVAL;
1691         }
1692         session->ctx_pool = internals->ctx_pool;
1693
1694         return 0;
1695
1696 err1:
1697         rte_free(session->cipher_key.data);
1698         rte_free(session->auth_key.data);
1699         memset(session, 0, sizeof(struct caam_jr_session));
1700
1701         return -EINVAL;
1702 }
1703
1704 static int
1705 caam_jr_sym_session_configure(struct rte_cryptodev *dev,
1706                               struct rte_crypto_sym_xform *xform,
1707                               struct rte_cryptodev_sym_session *sess,
1708                               struct rte_mempool *mempool)
1709 {
1710         void *sess_private_data;
1711         int ret;
1712
1713         PMD_INIT_FUNC_TRACE();
1714
1715         if (rte_mempool_get(mempool, &sess_private_data)) {
1716                 CAAM_JR_ERR("Couldn't get object from session mempool");
1717                 return -ENOMEM;
1718         }
1719
1720         memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1721         ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1722         if (ret != 0) {
1723                 CAAM_JR_ERR("failed to configure session parameters");
1724                 /* Return session to mempool */
1725                 rte_mempool_put(mempool, sess_private_data);
1726                 return ret;
1727         }
1728
1729         set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
1730
1731         return 0;
1732 }
1733
1734 /* Clear the memory of session so it doesn't leave key material behind */
1735 static void
1736 caam_jr_sym_session_clear(struct rte_cryptodev *dev,
1737                 struct rte_cryptodev_sym_session *sess)
1738 {
1739         uint8_t index = dev->driver_id;
1740         void *sess_priv = get_sym_session_private_data(sess, index);
1741         struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1742
1743         PMD_INIT_FUNC_TRACE();
1744
1745         if (sess_priv) {
1746                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1747
1748                 rte_free(s->cipher_key.data);
1749                 rte_free(s->auth_key.data);
1750                 memset(s, 0, sizeof(struct caam_jr_session));
1751                 set_sym_session_private_data(sess, index, NULL);
1752                 rte_mempool_put(sess_mp, sess_priv);
1753         }
1754 }
1755
1756 static int
1757 caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1758                           struct rte_security_session_conf *conf,
1759                           void *sess)
1760 {
1761         struct sec_job_ring_t *internals = dev->data->dev_private;
1762         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1763         struct rte_crypto_auth_xform *auth_xform;
1764         struct rte_crypto_cipher_xform *cipher_xform;
1765         struct caam_jr_session *session = (struct caam_jr_session *)sess;
1766
1767         PMD_INIT_FUNC_TRACE();
1768
1769         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1770                 cipher_xform = &conf->crypto_xform->cipher;
1771                 auth_xform = &conf->crypto_xform->next->auth;
1772         } else {
1773                 auth_xform = &conf->crypto_xform->auth;
1774                 cipher_xform = &conf->crypto_xform->next->cipher;
1775         }
1776         session->proto_alg = conf->protocol;
1777         session->cipher_key.data = rte_zmalloc(NULL,
1778                                                cipher_xform->key.length,
1779                                                RTE_CACHE_LINE_SIZE);
1780         if (session->cipher_key.data == NULL &&
1781                         cipher_xform->key.length > 0) {
1782                 CAAM_JR_ERR("No Memory for cipher key\n");
1783                 return -ENOMEM;
1784         }
1785
1786         session->cipher_key.length = cipher_xform->key.length;
1787         session->auth_key.data = rte_zmalloc(NULL,
1788                                         auth_xform->key.length,
1789                                         RTE_CACHE_LINE_SIZE);
1790         if (session->auth_key.data == NULL &&
1791                         auth_xform->key.length > 0) {
1792                 CAAM_JR_ERR("No Memory for auth key\n");
1793                 rte_free(session->cipher_key.data);
1794                 return -ENOMEM;
1795         }
1796         session->auth_key.length = auth_xform->key.length;
1797         memcpy(session->cipher_key.data, cipher_xform->key.data,
1798                         cipher_xform->key.length);
1799         memcpy(session->auth_key.data, auth_xform->key.data,
1800                         auth_xform->key.length);
1801
1802         switch (auth_xform->algo) {
1803         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1804                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1805                 break;
1806         case RTE_CRYPTO_AUTH_MD5_HMAC:
1807                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1808                 break;
1809         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1810                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1811                 break;
1812         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1813                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1814                 break;
1815         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1816                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1817                 break;
1818         case RTE_CRYPTO_AUTH_AES_CMAC:
1819                 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1820                 break;
1821         case RTE_CRYPTO_AUTH_NULL:
1822                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1823                 break;
1824         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1825         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1826         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1827         case RTE_CRYPTO_AUTH_SHA1:
1828         case RTE_CRYPTO_AUTH_SHA256:
1829         case RTE_CRYPTO_AUTH_SHA512:
1830         case RTE_CRYPTO_AUTH_SHA224:
1831         case RTE_CRYPTO_AUTH_SHA384:
1832         case RTE_CRYPTO_AUTH_MD5:
1833         case RTE_CRYPTO_AUTH_AES_GMAC:
1834         case RTE_CRYPTO_AUTH_KASUMI_F9:
1835         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1836         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1837                 CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
1838                         auth_xform->algo);
1839                 goto out;
1840         default:
1841                 CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
1842                         auth_xform->algo);
1843                 goto out;
1844         }
1845
1846         switch (cipher_xform->algo) {
1847         case RTE_CRYPTO_CIPHER_AES_CBC:
1848                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1849                 break;
1850         case RTE_CRYPTO_CIPHER_3DES_CBC:
1851                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1852                 break;
1853         case RTE_CRYPTO_CIPHER_AES_CTR:
1854                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1855                 break;
1856         case RTE_CRYPTO_CIPHER_NULL:
1857         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1858         case RTE_CRYPTO_CIPHER_3DES_ECB:
1859         case RTE_CRYPTO_CIPHER_AES_ECB:
1860         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1861                 CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
1862                         cipher_xform->algo);
1863                 goto out;
1864         default:
1865                 CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
1866                         cipher_xform->algo);
1867                 goto out;
1868         }
1869
1870         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1871                 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1872                                 sizeof(session->ip4_hdr));
1873                 session->ip4_hdr.ip_v = IPVERSION;
1874                 session->ip4_hdr.ip_hl = 5;
1875                 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1876                                                 sizeof(session->ip4_hdr));
1877                 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1878                 session->ip4_hdr.ip_id = 0;
1879                 session->ip4_hdr.ip_off = 0;
1880                 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1881                 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1882                                 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1883                                 : IPPROTO_AH;
1884                 session->ip4_hdr.ip_sum = 0;
1885                 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1886                 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1887                 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1888                                                 (void *)&session->ip4_hdr,
1889                                                 sizeof(struct ip));
1890
1891                 session->encap_pdb.options =
1892                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1893                         PDBOPTS_ESP_OIHI_PDB_INL |
1894                         PDBOPTS_ESP_IVSRC |
1895                         PDBHMO_ESP_ENCAP_DTTL;
1896                 if (ipsec_xform->options.esn)
1897                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
1898                 session->encap_pdb.spi = ipsec_xform->spi;
1899                 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1900
1901                 session->dir = DIR_ENC;
1902         } else if (ipsec_xform->direction ==
1903                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1904                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1905                 session->decap_pdb.options = sizeof(struct ip) << 16;
1906                 if (ipsec_xform->options.esn)
1907                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
1908                 session->dir = DIR_DEC;
1909         } else
1910                 goto out;
1911         session->ctx_pool = internals->ctx_pool;
1912
1913         return 0;
1914 out:
1915         rte_free(session->auth_key.data);
1916         rte_free(session->cipher_key.data);
1917         memset(session, 0, sizeof(struct caam_jr_session));
1918         return -1;
1919 }
1920
1921 static int
1922 caam_jr_security_session_create(void *dev,
1923                                 struct rte_security_session_conf *conf,
1924                                 struct rte_security_session *sess,
1925                                 struct rte_mempool *mempool)
1926 {
1927         void *sess_private_data;
1928         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1929         int ret;
1930
1931         if (rte_mempool_get(mempool, &sess_private_data)) {
1932                 CAAM_JR_ERR("Couldn't get object from session mempool");
1933                 return -ENOMEM;
1934         }
1935
1936         switch (conf->protocol) {
1937         case RTE_SECURITY_PROTOCOL_IPSEC:
1938                 ret = caam_jr_set_ipsec_session(cdev, conf,
1939                                 sess_private_data);
1940                 break;
1941         case RTE_SECURITY_PROTOCOL_MACSEC:
1942                 return -ENOTSUP;
1943         default:
1944                 return -EINVAL;
1945         }
1946         if (ret != 0) {
1947                 CAAM_JR_ERR("failed to configure session parameters");
1948                 /* Return session to mempool */
1949                 rte_mempool_put(mempool, sess_private_data);
1950                 return ret;
1951         }
1952
1953         set_sec_session_private_data(sess, sess_private_data);
1954
1955         return ret;
1956 }
1957
1958 /* Clear the memory of session so it doesn't leave key material behind */
1959 static int
1960 caam_jr_security_session_destroy(void *dev __rte_unused,
1961                                  struct rte_security_session *sess)
1962 {
1963         PMD_INIT_FUNC_TRACE();
1964         void *sess_priv = get_sec_session_private_data(sess);
1965
1966         struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1967
1968         if (sess_priv) {
1969                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1970
1971                 rte_free(s->cipher_key.data);
1972                 rte_free(s->auth_key.data);
1973                 memset(sess, 0, sizeof(struct caam_jr_session));
1974                 set_sec_session_private_data(sess, NULL);
1975                 rte_mempool_put(sess_mp, sess_priv);
1976         }
1977         return 0;
1978 }
1979
1980
1981 static int
1982 caam_jr_dev_configure(struct rte_cryptodev *dev,
1983                        struct rte_cryptodev_config *config __rte_unused)
1984 {
1985         char str[20];
1986         struct sec_job_ring_t *internals;
1987
1988         PMD_INIT_FUNC_TRACE();
1989
1990         internals = dev->data->dev_private;
1991         snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
1992         if (!internals->ctx_pool) {
1993                 internals->ctx_pool = rte_mempool_create((const char *)str,
1994                                                 CTX_POOL_NUM_BUFS,
1995                                                 sizeof(struct caam_jr_op_ctx),
1996                                                 CTX_POOL_CACHE_SIZE, 0,
1997                                                 NULL, NULL, NULL, NULL,
1998                                                 SOCKET_ID_ANY, 0);
1999                 if (!internals->ctx_pool) {
2000                         CAAM_JR_ERR("%s create failed\n", str);
2001                         return -ENOMEM;
2002                 }
2003         } else
2004                 CAAM_JR_INFO("mempool already created for dev_id : %d",
2005                                 dev->data->dev_id);
2006
2007         return 0;
2008 }
2009
2010 static int
2011 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
2012 {
2013         PMD_INIT_FUNC_TRACE();
2014         return 0;
2015 }
2016
2017 static void
2018 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
2019 {
2020         PMD_INIT_FUNC_TRACE();
2021 }
2022
2023 static int
2024 caam_jr_dev_close(struct rte_cryptodev *dev)
2025 {
2026         struct sec_job_ring_t *internals;
2027
2028         PMD_INIT_FUNC_TRACE();
2029
2030         if (dev == NULL)
2031                 return -ENOMEM;
2032
2033         internals = dev->data->dev_private;
2034         rte_mempool_free(internals->ctx_pool);
2035         internals->ctx_pool = NULL;
2036
2037         return 0;
2038 }
2039
2040 static void
2041 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
2042                        struct rte_cryptodev_info *info)
2043 {
2044         struct sec_job_ring_t *internals = dev->data->dev_private;
2045
2046         PMD_INIT_FUNC_TRACE();
2047         if (info != NULL) {
2048                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2049                 info->feature_flags = dev->feature_flags;
2050                 info->capabilities = caam_jr_get_cryptodev_capabilities();
2051                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2052                 info->driver_id = cryptodev_driver_id;
2053         }
2054 }
2055
2056 static struct rte_cryptodev_ops caam_jr_ops = {
2057         .dev_configure        = caam_jr_dev_configure,
2058         .dev_start            = caam_jr_dev_start,
2059         .dev_stop             = caam_jr_dev_stop,
2060         .dev_close            = caam_jr_dev_close,
2061         .dev_infos_get        = caam_jr_dev_infos_get,
2062         .stats_get            = caam_jr_stats_get,
2063         .stats_reset          = caam_jr_stats_reset,
2064         .queue_pair_setup     = caam_jr_queue_pair_setup,
2065         .queue_pair_release   = caam_jr_queue_pair_release,
2066         .queue_pair_count     = caam_jr_queue_pair_count,
2067         .sym_session_get_size = caam_jr_sym_session_get_size,
2068         .sym_session_configure = caam_jr_sym_session_configure,
2069         .sym_session_clear    = caam_jr_sym_session_clear
2070 };
2071
2072 static struct rte_security_ops caam_jr_security_ops = {
2073         .session_create = caam_jr_security_session_create,
2074         .session_update = NULL,
2075         .session_stats_get = NULL,
2076         .session_destroy = caam_jr_security_session_destroy,
2077         .set_pkt_metadata = NULL,
2078         .capabilities_get = caam_jr_get_security_capabilities
2079 };
2080
2081 /* @brief Flush job rings of any processed descs.
2082  * The processed descs are silently dropped,
2083  * WITHOUT being notified to UA.
2084  */
2085 static void
2086 close_job_ring(struct sec_job_ring_t *job_ring)
2087 {
2088         if (job_ring->irq_fd) {
2089                 /* Producer index is frozen. If consumer index is not equal
2090                  * with producer index, then we have descs to flush.
2091                  */
2092                 while (job_ring->pidx != job_ring->cidx)
2093                         hw_flush_job_ring(job_ring, false, NULL);
2094
2095                 /* free the uio job ring */
2096                 free_job_ring(job_ring->irq_fd);
2097                 job_ring->irq_fd = 0;
2098                 caam_jr_dma_free(job_ring->input_ring);
2099                 caam_jr_dma_free(job_ring->output_ring);
2100                 g_job_rings_no--;
2101         }
2102 }
2103
2104 /** @brief Release the software and hardware resources tied to a job ring.
2105  * @param [in] job_ring The job ring
2106  *
2107  * @retval  0 for success
2108  * @retval  -1 for error
2109  */
2110 static int
2111 shutdown_job_ring(struct sec_job_ring_t *job_ring)
2112 {
2113         int ret = 0;
2114
2115         PMD_INIT_FUNC_TRACE();
2116         ASSERT(job_ring != NULL);
2117         ret = hw_shutdown_job_ring(job_ring);
2118         SEC_ASSERT(ret == 0, ret,
2119                 "Failed to shutdown hardware job ring %p",
2120                 job_ring);
2121
2122         if (job_ring->coalescing_en)
2123                 hw_job_ring_disable_coalescing(job_ring);
2124
2125         if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
2126                 ret = caam_jr_disable_irqs(job_ring->irq_fd);
2127                 SEC_ASSERT(ret == 0, ret,
2128                 "Failed to disable irqs for job ring %p",
2129                 job_ring);
2130         }
2131
2132         return ret;
2133 }
2134
2135 /*
2136  * @brief Release the resources used by the SEC user space driver.
2137  *
2138  * Reset and release SEC's job rings indicated by the User Application at
2139  * init_job_ring() and free any memory allocated internally.
2140  * Call once during application tear down.
2141  *
2142  * @note In case there are any descriptors in-flight (descriptors received by
2143  * SEC driver for processing and for which no response was yet provided to UA),
2144  * the descriptors are discarded without any notifications to User Application.
2145  *
2146  * @retval ::0                  is returned for a successful execution
2147  * @retval ::-1         is returned if SEC driver release is in progress
2148  */
2149 static int
2150 caam_jr_dev_uninit(struct rte_cryptodev *dev)
2151 {
2152         struct sec_job_ring_t *internals;
2153
2154         PMD_INIT_FUNC_TRACE();
2155         if (dev == NULL)
2156                 return -ENODEV;
2157
2158         internals = dev->data->dev_private;
2159         rte_free(dev->security_ctx);
2160
2161         /* If any descriptors in flight , poll and wait
2162          * until all descriptors are received and silently discarded.
2163          */
2164         if (internals) {
2165                 shutdown_job_ring(internals);
2166                 close_job_ring(internals);
2167                 rte_mempool_free(internals->ctx_pool);
2168         }
2169
2170         CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
2171
2172         /* last caam jr instance) */
2173         if (g_job_rings_no == 0)
2174                 g_driver_state = SEC_DRIVER_STATE_IDLE;
2175
2176         return SEC_SUCCESS;
2177 }
2178
2179 /* @brief Initialize the software and hardware resources tied to a job ring.
2180  * @param [in] jr_mode;         Model to be used by SEC Driver to receive
2181  *                              notifications from SEC.  Can be either
2182  *                              of the three: #SEC_NOTIFICATION_TYPE_NAPI
2183  *                              #SEC_NOTIFICATION_TYPE_IRQ or
2184  *                              #SEC_NOTIFICATION_TYPE_POLL
2185  * @param [in] NAPI_mode        The NAPI work mode to configure a job ring at
2186  *                              startup. Used only when #SEC_NOTIFICATION_TYPE
2187  *                              is set to #SEC_NOTIFICATION_TYPE_NAPI.
2188  * @param [in] irq_coalescing_timer This value determines the maximum
2189  *                                      amount of time after processing a
2190  *                                      descriptor before raising an interrupt.
2191  * @param [in] irq_coalescing_count This value determines how many
2192  *                                      descriptors are completed before
2193  *                                      raising an interrupt.
2194  * @param [in] reg_base_addr,   The job ring base address register
2195  * @param [in] irq_id           The job ring interrupt identification number.
2196  * @retval  job_ring_handle for successful job ring configuration
2197  * @retval  NULL on error
2198  *
2199  */
2200 static void *
2201 init_job_ring(void *reg_base_addr, uint32_t irq_id)
2202 {
2203         struct sec_job_ring_t *job_ring = NULL;
2204         int i, ret = 0;
2205         int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
2206         int napi_mode = 0;
2207         int irq_coalescing_timer = 0;
2208         int irq_coalescing_count = 0;
2209
2210         for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
2211                 if (g_job_rings[i].irq_fd == 0) {
2212                         job_ring = &g_job_rings[i];
2213                         g_job_rings_no++;
2214                         break;
2215                 }
2216         }
2217         if (job_ring == NULL) {
2218                 CAAM_JR_ERR("No free job ring\n");
2219                 return NULL;
2220         }
2221
2222         job_ring->register_base_addr = reg_base_addr;
2223         job_ring->jr_mode = jr_mode;
2224         job_ring->napi_mode = 0;
2225         job_ring->irq_fd = irq_id;
2226
2227         /* Allocate mem for input and output ring */
2228
2229         /* Allocate memory for input ring */
2230         job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2231                                 SEC_DMA_MEM_INPUT_RING_SIZE);
2232         memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
2233
2234         /* Allocate memory for output ring */
2235         job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2236                                 SEC_DMA_MEM_OUTPUT_RING_SIZE);
2237         memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
2238
2239         /* Reset job ring in SEC hw and configure job ring registers */
2240         ret = hw_reset_job_ring(job_ring);
2241         if (ret != 0) {
2242                 CAAM_JR_ERR("Failed to reset hardware job ring");
2243                 goto cleanup;
2244         }
2245
2246         if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
2247         /* When SEC US driver works in NAPI mode, the UA can select
2248          * if the driver starts with IRQs on or off.
2249          */
2250                 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
2251                         CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
2252                                 job_ring);
2253                         ret = caam_jr_enable_irqs(job_ring->irq_fd);
2254                         if (ret != 0) {
2255                                 CAAM_JR_ERR("Failed to enable irqs for job ring");
2256                                 goto cleanup;
2257                         }
2258                 }
2259         } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
2260         /* When SEC US driver works in pure interrupt mode,
2261          * IRQ's are always enabled.
2262          */
2263                 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
2264                          job_ring);
2265                 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2266                 if (ret != 0) {
2267                         CAAM_JR_ERR("Failed to enable irqs for job ring");
2268                         goto cleanup;
2269                 }
2270         }
2271         if (irq_coalescing_timer || irq_coalescing_count) {
2272                 hw_job_ring_set_coalescing_param(job_ring,
2273                          irq_coalescing_timer,
2274                          irq_coalescing_count);
2275
2276                 hw_job_ring_enable_coalescing(job_ring);
2277                 job_ring->coalescing_en = 1;
2278         }
2279
2280         job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
2281         job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
2282         job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
2283
2284         return job_ring;
2285 cleanup:
2286         caam_jr_dma_free(job_ring->output_ring);
2287         caam_jr_dma_free(job_ring->input_ring);
2288         return NULL;
2289 }
2290
2291
2292 static int
2293 caam_jr_dev_init(const char *name,
2294                  struct rte_vdev_device *vdev,
2295                  struct rte_cryptodev_pmd_init_params *init_params)
2296 {
2297         struct rte_cryptodev *dev;
2298         struct rte_security_ctx *security_instance;
2299         struct uio_job_ring *job_ring;
2300         char str[RTE_CRYPTODEV_NAME_MAX_LEN];
2301
2302         PMD_INIT_FUNC_TRACE();
2303
2304         /* Validate driver state */
2305         if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
2306                 g_job_rings_max = sec_configure();
2307                 if (!g_job_rings_max) {
2308                         CAAM_JR_ERR("No job ring detected on UIO !!!!");
2309                         return -1;
2310                 }
2311                 /* Update driver state */
2312                 g_driver_state = SEC_DRIVER_STATE_STARTED;
2313         }
2314
2315         if (g_job_rings_no >= g_job_rings_max) {
2316                 CAAM_JR_ERR("No more job rings available max=%d!!!!",
2317                                 g_job_rings_max);
2318                 return -1;
2319         }
2320
2321         job_ring = config_job_ring();
2322         if (job_ring == NULL) {
2323                 CAAM_JR_ERR("failed to create job ring");
2324                 goto init_error;
2325         }
2326
2327         snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
2328
2329         dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2330         if (dev == NULL) {
2331                 CAAM_JR_ERR("failed to create cryptodev vdev");
2332                 goto cleanup;
2333         }
2334         /*TODO free it during teardown*/
2335         dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
2336                                                 job_ring->uio_fd);
2337
2338         if (!dev->data->dev_private) {
2339                 CAAM_JR_ERR("Ring memory allocation failed\n");
2340                 goto cleanup2;
2341         }
2342
2343         dev->driver_id = cryptodev_driver_id;
2344         dev->dev_ops = &caam_jr_ops;
2345
2346         /* register rx/tx burst functions for data path */
2347         dev->dequeue_burst = caam_jr_dequeue_burst;
2348         dev->enqueue_burst = caam_jr_enqueue_burst;
2349         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2350                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2351                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2352                         RTE_CRYPTODEV_FF_SECURITY |
2353                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2354                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2355                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2356                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2357                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2358
2359         /* For secondary processes, we don't initialise any further as primary
2360          * has already done this work. Only check we don't need a different
2361          * RX function
2362          */
2363         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2364                 CAAM_JR_WARN("Device already init by primary process");
2365                 return 0;
2366         }
2367
2368         /*TODO free it during teardown*/
2369         security_instance = rte_malloc("caam_jr",
2370                                 sizeof(struct rte_security_ctx), 0);
2371         if (security_instance == NULL) {
2372                 CAAM_JR_ERR("memory allocation failed\n");
2373                 //todo error handling.
2374                 goto cleanup2;
2375         }
2376
2377         security_instance->device = (void *)dev;
2378         security_instance->ops = &caam_jr_security_ops;
2379         security_instance->sess_cnt = 0;
2380         dev->security_ctx = security_instance;
2381
2382         RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
2383
2384         return 0;
2385
2386 cleanup2:
2387         caam_jr_dev_uninit(dev);
2388         rte_cryptodev_pmd_release_device(dev);
2389 cleanup:
2390         free_job_ring(job_ring->uio_fd);
2391 init_error:
2392         CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
2393                         init_params->name);
2394
2395         return -ENXIO;
2396 }
2397
2398 /** Initialise CAAM JR crypto device */
2399 static int
2400 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
2401 {
2402         struct rte_cryptodev_pmd_init_params init_params = {
2403                 "",
2404                 sizeof(struct sec_job_ring_t),
2405                 rte_socket_id(),
2406                 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2407         };
2408         const char *name;
2409         const char *input_args;
2410
2411         name = rte_vdev_device_name(vdev);
2412         if (name == NULL)
2413                 return -EINVAL;
2414
2415         input_args = rte_vdev_device_args(vdev);
2416         rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
2417
2418         /* if sec device version is not configured */
2419         if (!rta_get_sec_era()) {
2420                 const struct device_node *caam_node;
2421
2422                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2423                         const uint32_t *prop = of_get_property(caam_node,
2424                                         "fsl,sec-era",
2425                                         NULL);
2426                         if (prop) {
2427                                 rta_set_sec_era(
2428                                         INTL_SEC_ERA(cpu_to_caam32(*prop)));
2429                                 break;
2430                         }
2431                 }
2432         }
2433 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
2434         if (rta_get_sec_era() > RTA_SEC_ERA_8) {
2435                 RTE_LOG(ERR, PMD,
2436                 "CAAM is compiled in BE mode for device with sec era > 8???\n");
2437                 return -EINVAL;
2438         }
2439 #endif
2440
2441         return caam_jr_dev_init(name, vdev, &init_params);
2442 }
2443
2444 /** Uninitialise CAAM JR crypto device */
2445 static int
2446 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
2447 {
2448         struct rte_cryptodev *cryptodev;
2449         const char *name;
2450
2451         name = rte_vdev_device_name(vdev);
2452         if (name == NULL)
2453                 return -EINVAL;
2454
2455         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2456         if (cryptodev == NULL)
2457                 return -ENODEV;
2458
2459         caam_jr_dev_uninit(cryptodev);
2460
2461         return rte_cryptodev_pmd_destroy(cryptodev);
2462 }
2463
2464 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
2465         .probe = cryptodev_caam_jr_probe,
2466         .remove = cryptodev_caam_jr_remove
2467 };
2468
2469 static struct cryptodev_driver caam_jr_crypto_drv;
2470
2471 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
2472 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
2473         "max_nb_queue_pairs=<int>"
2474         "socket_id=<int>");
2475 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
2476                 cryptodev_driver_id);
2477
2478 RTE_INIT(caam_jr_init_log)
2479 {
2480         caam_jr_logtype = rte_log_register("pmd.crypto.caam");
2481         if (caam_jr_logtype >= 0)
2482                 rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);
2483 }