crypto/dpaa_sec: support PDCP short MAC-I
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2021 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_ip.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
26 #include <rte_mbuf.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
30
31 #include <fsl_usd.h>
32 #include <fsl_qman.h>
33 #include <dpaa_of.h>
34
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
41
42 #include <rte_dpaa_bus.h>
43 #include <dpaa_sec.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
47
48 static uint8_t cryptodev_driver_id;
49
50 static int
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
52
53 static inline void
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
55 {
56         if (!ctx->fd_status) {
57                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
58         } else {
59                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
61         }
62 }
63
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
66 {
67         struct dpaa_sec_op_ctx *ctx;
68         int i, retval;
69
70         retval = rte_mempool_get(
71                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
72                         (void **)(&ctx));
73         if (!ctx || retval) {
74                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
75                 return NULL;
76         }
77         /*
78          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81          * each packet, memset is costlier than dcbz_64().
82          */
83         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
84                 dcbz_64(&ctx->job.sg[i]);
85
86         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
87         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
88
89         return ctx;
90 }
91
92 static void
93 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
94                    struct qman_fq *fq,
95                    const struct qm_mr_entry *msg)
96 {
97         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
98                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
99 }
100
101 /* initialize the queue with dest chan as caam chan so that
102  * all the packets in this queue could be dispatched into caam
103  */
104 static int
105 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
106                  uint32_t fqid_out)
107 {
108         struct qm_mcc_initfq fq_opts;
109         uint32_t flags;
110         int ret = -1;
111
112         /* Clear FQ options */
113         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
114
115         flags = QMAN_INITFQ_FLAG_SCHED;
116         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
117                           QM_INITFQ_WE_CONTEXTB;
118
119         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
120         fq_opts.fqd.context_b = fqid_out;
121         fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
122         fq_opts.fqd.dest.wq = 0;
123
124         fq_in->cb.ern  = ern_sec_fq_handler;
125
126         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
127
128         ret = qman_init_fq(fq_in, flags, &fq_opts);
129         if (unlikely(ret != 0))
130                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
131
132         return ret;
133 }
134
135 /* something is put into in_fq and caam put the crypto result into out_fq */
136 static enum qman_cb_dqrr_result
137 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
138                   struct qman_fq *fq __always_unused,
139                   const struct qm_dqrr_entry *dqrr)
140 {
141         const struct qm_fd *fd;
142         struct dpaa_sec_job *job;
143         struct dpaa_sec_op_ctx *ctx;
144
145         if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
146                 return qman_cb_dqrr_defer;
147
148         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
149                 return qman_cb_dqrr_consume;
150
151         fd = &dqrr->fd;
152         /* sg is embedded in an op ctx,
153          * sg[0] is for output
154          * sg[1] for input
155          */
156         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
157
158         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
159         ctx->fd_status = fd->status;
160         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
161                 struct qm_sg_entry *sg_out;
162                 uint32_t len;
163                 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
164                                 ctx->op->sym->m_src : ctx->op->sym->m_dst;
165
166                 sg_out = &job->sg[0];
167                 hw_sg_to_cpu(sg_out);
168                 len = sg_out->length;
169                 mbuf->pkt_len = len;
170                 while (mbuf->next != NULL) {
171                         len -= mbuf->data_len;
172                         mbuf = mbuf->next;
173                 }
174                 mbuf->data_len = len;
175         }
176         DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
177         dpaa_sec_op_ending(ctx);
178
179         return qman_cb_dqrr_consume;
180 }
181
182 /* caam result is put into this queue */
183 static int
184 dpaa_sec_init_tx(struct qman_fq *fq)
185 {
186         int ret;
187         struct qm_mcc_initfq opts;
188         uint32_t flags;
189
190         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
191                 QMAN_FQ_FLAG_DYNAMIC_FQID;
192
193         ret = qman_create_fq(0, flags, fq);
194         if (unlikely(ret)) {
195                 DPAA_SEC_ERR("qman_create_fq failed");
196                 return ret;
197         }
198
199         memset(&opts, 0, sizeof(opts));
200         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
201                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
202
203         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
204
205         fq->cb.dqrr = dqrr_out_fq_cb_rx;
206         fq->cb.ern  = ern_sec_fq_handler;
207
208         ret = qman_init_fq(fq, 0, &opts);
209         if (unlikely(ret)) {
210                 DPAA_SEC_ERR("unable to init caam source fq!");
211                 return ret;
212         }
213
214         return ret;
215 }
216
217 static inline int is_aead(dpaa_sec_session *ses)
218 {
219         return ((ses->cipher_alg == 0) &&
220                 (ses->auth_alg == 0) &&
221                 (ses->aead_alg != 0));
222 }
223
224 static inline int is_encode(dpaa_sec_session *ses)
225 {
226         return ses->dir == DIR_ENC;
227 }
228
229 static inline int is_decode(dpaa_sec_session *ses)
230 {
231         return ses->dir == DIR_DEC;
232 }
233
234 #ifdef RTE_LIB_SECURITY
235 static int
236 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
237 {
238         struct alginfo authdata = {0}, cipherdata = {0};
239         struct sec_cdb *cdb = &ses->cdb;
240         struct alginfo *p_authdata = NULL;
241         int32_t shared_desc_len = 0;
242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
243         int swap = false;
244 #else
245         int swap = true;
246 #endif
247
248         cipherdata.key = (size_t)ses->cipher_key.data;
249         cipherdata.keylen = ses->cipher_key.length;
250         cipherdata.key_enc_flags = 0;
251         cipherdata.key_type = RTA_DATA_IMM;
252         cipherdata.algtype = ses->cipher_key.alg;
253         cipherdata.algmode = ses->cipher_key.algmode;
254
255         if (ses->auth_alg) {
256                 authdata.key = (size_t)ses->auth_key.data;
257                 authdata.keylen = ses->auth_key.length;
258                 authdata.key_enc_flags = 0;
259                 authdata.key_type = RTA_DATA_IMM;
260                 authdata.algtype = ses->auth_key.alg;
261                 authdata.algmode = ses->auth_key.algmode;
262
263                 p_authdata = &authdata;
264         }
265
266         if (ses->pdcp.sdap_enabled) {
267                 int nb_keys_to_inline =
268                                 rta_inline_pdcp_sdap_query(authdata.algtype,
269                                         cipherdata.algtype,
270                                         ses->pdcp.sn_size,
271                                         ses->pdcp.hfn_ovd);
272                 if (nb_keys_to_inline >= 1) {
273                         cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
274                                                 (size_t)cipherdata.key);
275                         cipherdata.key_type = RTA_DATA_PTR;
276                 }
277                 if (nb_keys_to_inline >= 2) {
278                         authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
279                                                 (size_t)authdata.key);
280                         authdata.key_type = RTA_DATA_PTR;
281                 }
282         } else {
283                 if (rta_inline_pdcp_query(authdata.algtype,
284                                         cipherdata.algtype,
285                                         ses->pdcp.sn_size,
286                                         ses->pdcp.hfn_ovd)) {
287                         cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
288                                                 (size_t)cipherdata.key);
289                         cipherdata.key_type = RTA_DATA_PTR;
290                 }
291         }
292
293         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
294                 if (ses->dir == DIR_ENC)
295                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
296                                         cdb->sh_desc, 1, swap,
297                                         ses->pdcp.hfn,
298                                         ses->pdcp.sn_size,
299                                         ses->pdcp.bearer,
300                                         ses->pdcp.pkt_dir,
301                                         ses->pdcp.hfn_threshold,
302                                         &cipherdata, &authdata,
303                                         0);
304                 else if (ses->dir == DIR_DEC)
305                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
306                                         cdb->sh_desc, 1, swap,
307                                         ses->pdcp.hfn,
308                                         ses->pdcp.sn_size,
309                                         ses->pdcp.bearer,
310                                         ses->pdcp.pkt_dir,
311                                         ses->pdcp.hfn_threshold,
312                                         &cipherdata, &authdata,
313                                         0);
314         } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
315                 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
316                                                      1, swap, &authdata);
317         } else {
318                 if (ses->dir == DIR_ENC) {
319                         if (ses->pdcp.sdap_enabled)
320                                 shared_desc_len =
321                                         cnstr_shdsc_pdcp_sdap_u_plane_encap(
322                                                 cdb->sh_desc, 1, swap,
323                                                 ses->pdcp.sn_size,
324                                                 ses->pdcp.hfn,
325                                                 ses->pdcp.bearer,
326                                                 ses->pdcp.pkt_dir,
327                                                 ses->pdcp.hfn_threshold,
328                                                 &cipherdata, p_authdata, 0);
329                         else
330                                 shared_desc_len =
331                                         cnstr_shdsc_pdcp_u_plane_encap(
332                                                 cdb->sh_desc, 1, swap,
333                                                 ses->pdcp.sn_size,
334                                                 ses->pdcp.hfn,
335                                                 ses->pdcp.bearer,
336                                                 ses->pdcp.pkt_dir,
337                                                 ses->pdcp.hfn_threshold,
338                                                 &cipherdata, p_authdata, 0);
339                 } else if (ses->dir == DIR_DEC) {
340                         if (ses->pdcp.sdap_enabled)
341                                 shared_desc_len =
342                                         cnstr_shdsc_pdcp_sdap_u_plane_decap(
343                                                 cdb->sh_desc, 1, swap,
344                                                 ses->pdcp.sn_size,
345                                                 ses->pdcp.hfn,
346                                                 ses->pdcp.bearer,
347                                                 ses->pdcp.pkt_dir,
348                                                 ses->pdcp.hfn_threshold,
349                                                 &cipherdata, p_authdata, 0);
350                         else
351                                 shared_desc_len =
352                                         cnstr_shdsc_pdcp_u_plane_decap(
353                                                 cdb->sh_desc, 1, swap,
354                                                 ses->pdcp.sn_size,
355                                                 ses->pdcp.hfn,
356                                                 ses->pdcp.bearer,
357                                                 ses->pdcp.pkt_dir,
358                                                 ses->pdcp.hfn_threshold,
359                                                 &cipherdata, p_authdata, 0);
360                 }
361         }
362         return shared_desc_len;
363 }
364
365 /* prepare ipsec proto command block of the session */
366 static int
367 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
368 {
369         struct alginfo cipherdata = {0}, authdata = {0};
370         struct sec_cdb *cdb = &ses->cdb;
371         int32_t shared_desc_len = 0;
372         int err;
373 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
374         int swap = false;
375 #else
376         int swap = true;
377 #endif
378
379         cipherdata.key = (size_t)ses->cipher_key.data;
380         cipherdata.keylen = ses->cipher_key.length;
381         cipherdata.key_enc_flags = 0;
382         cipherdata.key_type = RTA_DATA_IMM;
383         cipherdata.algtype = ses->cipher_key.alg;
384         cipherdata.algmode = ses->cipher_key.algmode;
385
386         if (ses->auth_key.length) {
387                 authdata.key = (size_t)ses->auth_key.data;
388                 authdata.keylen = ses->auth_key.length;
389                 authdata.key_enc_flags = 0;
390                 authdata.key_type = RTA_DATA_IMM;
391                 authdata.algtype = ses->auth_key.alg;
392                 authdata.algmode = ses->auth_key.algmode;
393         }
394
395         cdb->sh_desc[0] = cipherdata.keylen;
396         cdb->sh_desc[1] = authdata.keylen;
397         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
398                                DESC_JOB_IO_LEN,
399                                (unsigned int *)cdb->sh_desc,
400                                &cdb->sh_desc[2], 2);
401
402         if (err < 0) {
403                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
404                 return err;
405         }
406         if (cdb->sh_desc[2] & 1)
407                 cipherdata.key_type = RTA_DATA_IMM;
408         else {
409                 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
410                                         (void *)(size_t)cipherdata.key);
411                 cipherdata.key_type = RTA_DATA_PTR;
412         }
413         if (cdb->sh_desc[2] & (1<<1))
414                 authdata.key_type = RTA_DATA_IMM;
415         else {
416                 authdata.key = (size_t)rte_dpaa_mem_vtop(
417                                         (void *)(size_t)authdata.key);
418                 authdata.key_type = RTA_DATA_PTR;
419         }
420
421         cdb->sh_desc[0] = 0;
422         cdb->sh_desc[1] = 0;
423         cdb->sh_desc[2] = 0;
424         if (ses->dir == DIR_ENC) {
425                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
426                                 cdb->sh_desc,
427                                 true, swap, SHR_SERIAL,
428                                 &ses->encap_pdb,
429                                 (uint8_t *)&ses->ip4_hdr,
430                                 &cipherdata, &authdata);
431         } else if (ses->dir == DIR_DEC) {
432                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
433                                 cdb->sh_desc,
434                                 true, swap, SHR_SERIAL,
435                                 &ses->decap_pdb,
436                                 &cipherdata, &authdata);
437         }
438         return shared_desc_len;
439 }
440 #endif
441 /* prepare command block of the session */
442 static int
443 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
444 {
445         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
446         int32_t shared_desc_len = 0;
447         struct sec_cdb *cdb = &ses->cdb;
448         int err;
449 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
450         int swap = false;
451 #else
452         int swap = true;
453 #endif
454
455         memset(cdb, 0, sizeof(struct sec_cdb));
456
457         switch (ses->ctxt) {
458 #ifdef RTE_LIB_SECURITY
459         case DPAA_SEC_IPSEC:
460                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
461                 break;
462         case DPAA_SEC_PDCP:
463                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
464                 break;
465 #endif
466         case DPAA_SEC_CIPHER:
467                 alginfo_c.key = (size_t)ses->cipher_key.data;
468                 alginfo_c.keylen = ses->cipher_key.length;
469                 alginfo_c.key_enc_flags = 0;
470                 alginfo_c.key_type = RTA_DATA_IMM;
471                 alginfo_c.algtype = ses->cipher_key.alg;
472                 alginfo_c.algmode = ses->cipher_key.algmode;
473
474                 switch (ses->cipher_alg) {
475                 case RTE_CRYPTO_CIPHER_AES_CBC:
476                 case RTE_CRYPTO_CIPHER_3DES_CBC:
477                 case RTE_CRYPTO_CIPHER_DES_CBC:
478                 case RTE_CRYPTO_CIPHER_AES_CTR:
479                 case RTE_CRYPTO_CIPHER_3DES_CTR:
480                         shared_desc_len = cnstr_shdsc_blkcipher(
481                                         cdb->sh_desc, true,
482                                         swap, SHR_NEVER, &alginfo_c,
483                                         ses->iv.length,
484                                         ses->dir);
485                         break;
486                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
487                         shared_desc_len = cnstr_shdsc_snow_f8(
488                                         cdb->sh_desc, true, swap,
489                                         &alginfo_c,
490                                         ses->dir);
491                         break;
492                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
493                         shared_desc_len = cnstr_shdsc_zuce(
494                                         cdb->sh_desc, true, swap,
495                                         &alginfo_c,
496                                         ses->dir);
497                         break;
498                 default:
499                         DPAA_SEC_ERR("unsupported cipher alg %d",
500                                      ses->cipher_alg);
501                         return -ENOTSUP;
502                 }
503                 break;
504         case DPAA_SEC_AUTH:
505                 alginfo_a.key = (size_t)ses->auth_key.data;
506                 alginfo_a.keylen = ses->auth_key.length;
507                 alginfo_a.key_enc_flags = 0;
508                 alginfo_a.key_type = RTA_DATA_IMM;
509                 alginfo_a.algtype = ses->auth_key.alg;
510                 alginfo_a.algmode = ses->auth_key.algmode;
511                 switch (ses->auth_alg) {
512                 case RTE_CRYPTO_AUTH_MD5:
513                 case RTE_CRYPTO_AUTH_SHA1:
514                 case RTE_CRYPTO_AUTH_SHA224:
515                 case RTE_CRYPTO_AUTH_SHA256:
516                 case RTE_CRYPTO_AUTH_SHA384:
517                 case RTE_CRYPTO_AUTH_SHA512:
518                         shared_desc_len = cnstr_shdsc_hash(
519                                                 cdb->sh_desc, true,
520                                                 swap, SHR_NEVER, &alginfo_a,
521                                                 !ses->dir,
522                                                 ses->digest_length);
523                         break;
524                 case RTE_CRYPTO_AUTH_MD5_HMAC:
525                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
526                 case RTE_CRYPTO_AUTH_SHA224_HMAC:
527                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
528                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
529                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
530                         shared_desc_len = cnstr_shdsc_hmac(
531                                                 cdb->sh_desc, true,
532                                                 swap, SHR_NEVER, &alginfo_a,
533                                                 !ses->dir,
534                                                 ses->digest_length);
535                         break;
536                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
537                         shared_desc_len = cnstr_shdsc_snow_f9(
538                                                 cdb->sh_desc, true, swap,
539                                                 &alginfo_a,
540                                                 !ses->dir,
541                                                 ses->digest_length);
542                         break;
543                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
544                         shared_desc_len = cnstr_shdsc_zuca(
545                                                 cdb->sh_desc, true, swap,
546                                                 &alginfo_a,
547                                                 !ses->dir,
548                                                 ses->digest_length);
549                         break;
550                 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
551                 case RTE_CRYPTO_AUTH_AES_CMAC:
552                         shared_desc_len = cnstr_shdsc_aes_mac(
553                                                 cdb->sh_desc,
554                                                 true, swap, SHR_NEVER,
555                                                 &alginfo_a,
556                                                 !ses->dir,
557                                                 ses->digest_length);
558                         break;
559                 default:
560                         DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
561                 }
562                 break;
563         case DPAA_SEC_AEAD:
564                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
565                         DPAA_SEC_ERR("not supported aead alg");
566                         return -ENOTSUP;
567                 }
568                 alginfo.key = (size_t)ses->aead_key.data;
569                 alginfo.keylen = ses->aead_key.length;
570                 alginfo.key_enc_flags = 0;
571                 alginfo.key_type = RTA_DATA_IMM;
572                 alginfo.algtype = ses->aead_key.alg;
573                 alginfo.algmode = ses->aead_key.algmode;
574
575                 if (ses->dir == DIR_ENC)
576                         shared_desc_len = cnstr_shdsc_gcm_encap(
577                                         cdb->sh_desc, true, swap, SHR_NEVER,
578                                         &alginfo,
579                                         ses->iv.length,
580                                         ses->digest_length);
581                 else
582                         shared_desc_len = cnstr_shdsc_gcm_decap(
583                                         cdb->sh_desc, true, swap, SHR_NEVER,
584                                         &alginfo,
585                                         ses->iv.length,
586                                         ses->digest_length);
587                 break;
588         case DPAA_SEC_CIPHER_HASH:
589                 alginfo_c.key = (size_t)ses->cipher_key.data;
590                 alginfo_c.keylen = ses->cipher_key.length;
591                 alginfo_c.key_enc_flags = 0;
592                 alginfo_c.key_type = RTA_DATA_IMM;
593                 alginfo_c.algtype = ses->cipher_key.alg;
594                 alginfo_c.algmode = ses->cipher_key.algmode;
595
596                 alginfo_a.key = (size_t)ses->auth_key.data;
597                 alginfo_a.keylen = ses->auth_key.length;
598                 alginfo_a.key_enc_flags = 0;
599                 alginfo_a.key_type = RTA_DATA_IMM;
600                 alginfo_a.algtype = ses->auth_key.alg;
601                 alginfo_a.algmode = ses->auth_key.algmode;
602
603                 cdb->sh_desc[0] = alginfo_c.keylen;
604                 cdb->sh_desc[1] = alginfo_a.keylen;
605                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
606                                        DESC_JOB_IO_LEN,
607                                        (unsigned int *)cdb->sh_desc,
608                                        &cdb->sh_desc[2], 2);
609
610                 if (err < 0) {
611                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
612                         return err;
613                 }
614                 if (cdb->sh_desc[2] & 1)
615                         alginfo_c.key_type = RTA_DATA_IMM;
616                 else {
617                         alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
618                                                 (void *)(size_t)alginfo_c.key);
619                         alginfo_c.key_type = RTA_DATA_PTR;
620                 }
621                 if (cdb->sh_desc[2] & (1<<1))
622                         alginfo_a.key_type = RTA_DATA_IMM;
623                 else {
624                         alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
625                                                 (void *)(size_t)alginfo_a.key);
626                         alginfo_a.key_type = RTA_DATA_PTR;
627                 }
628                 cdb->sh_desc[0] = 0;
629                 cdb->sh_desc[1] = 0;
630                 cdb->sh_desc[2] = 0;
631                 /* Auth_only_len is set as 0 here and it will be
632                  * overwritten in fd for each packet.
633                  */
634                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
635                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
636                                 ses->iv.length,
637                                 ses->digest_length, ses->dir);
638                 break;
639         case DPAA_SEC_HASH_CIPHER:
640         default:
641                 DPAA_SEC_ERR("error: Unsupported session");
642                 return -ENOTSUP;
643         }
644
645         if (shared_desc_len < 0) {
646                 DPAA_SEC_ERR("error in preparing command block");
647                 return shared_desc_len;
648         }
649
650         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
651         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
652         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
653
654         return 0;
655 }
656
657 /* qp is lockless, should be accessed by only one thread */
658 static int
659 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
660 {
661         struct qman_fq *fq;
662         unsigned int pkts = 0;
663         int num_rx_bufs, ret;
664         struct qm_dqrr_entry *dq;
665         uint32_t vdqcr_flags = 0;
666
667         fq = &qp->outq;
668         /*
669          * Until request for four buffers, we provide exact number of buffers.
670          * Otherwise we do not set the QM_VDQCR_EXACT flag.
671          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
672          * requested, so we request two less in this case.
673          */
674         if (nb_ops < 4) {
675                 vdqcr_flags = QM_VDQCR_EXACT;
676                 num_rx_bufs = nb_ops;
677         } else {
678                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
679                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
680         }
681         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
682         if (ret)
683                 return 0;
684
685         do {
686                 const struct qm_fd *fd;
687                 struct dpaa_sec_job *job;
688                 struct dpaa_sec_op_ctx *ctx;
689                 struct rte_crypto_op *op;
690
691                 dq = qman_dequeue(fq);
692                 if (!dq)
693                         continue;
694
695                 fd = &dq->fd;
696                 /* sg is embedded in an op ctx,
697                  * sg[0] is for output
698                  * sg[1] for input
699                  */
700                 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
701
702                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
703                 ctx->fd_status = fd->status;
704                 op = ctx->op;
705                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
706                         struct qm_sg_entry *sg_out;
707                         uint32_t len;
708                         struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
709                                                 op->sym->m_src : op->sym->m_dst;
710
711                         sg_out = &job->sg[0];
712                         hw_sg_to_cpu(sg_out);
713                         len = sg_out->length;
714                         mbuf->pkt_len = len;
715                         while (mbuf->next != NULL) {
716                                 len -= mbuf->data_len;
717                                 mbuf = mbuf->next;
718                         }
719                         mbuf->data_len = len;
720                 }
721                 if (!ctx->fd_status) {
722                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
723                 } else {
724                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
725                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
726                 }
727                 ops[pkts++] = op;
728
729                 /* report op status to sym->op and then free the ctx memeory */
730                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
731
732                 qman_dqrr_consume(fq, dq);
733         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
734
735         return pkts;
736 }
737
738 static inline struct dpaa_sec_job *
739 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
740 {
741         struct rte_crypto_sym_op *sym = op->sym;
742         struct rte_mbuf *mbuf = sym->m_src;
743         struct dpaa_sec_job *cf;
744         struct dpaa_sec_op_ctx *ctx;
745         struct qm_sg_entry *sg, *out_sg, *in_sg;
746         phys_addr_t start_addr;
747         uint8_t *old_digest, extra_segs;
748         int data_len, data_offset;
749
750         data_len = sym->auth.data.length;
751         data_offset = sym->auth.data.offset;
752
753         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
754             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
755                 if ((data_len & 7) || (data_offset & 7)) {
756                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
757                         return NULL;
758                 }
759
760                 data_len = data_len >> 3;
761                 data_offset = data_offset >> 3;
762         }
763
764         if (is_decode(ses))
765                 extra_segs = 3;
766         else
767                 extra_segs = 2;
768
769         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
770                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
771                                 MAX_SG_ENTRIES);
772                 return NULL;
773         }
774         ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
775         if (!ctx)
776                 return NULL;
777
778         cf = &ctx->job;
779         ctx->op = op;
780         old_digest = ctx->digest;
781
782         /* output */
783         out_sg = &cf->sg[0];
784         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
785         out_sg->length = ses->digest_length;
786         cpu_to_hw_sg(out_sg);
787
788         /* input */
789         in_sg = &cf->sg[1];
790         /* need to extend the input to a compound frame */
791         in_sg->extension = 1;
792         in_sg->final = 1;
793         in_sg->length = data_len;
794         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
795
796         /* 1st seg */
797         sg = in_sg + 1;
798
799         if (ses->iv.length) {
800                 uint8_t *iv_ptr;
801
802                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
803                                                    ses->iv.offset);
804
805                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
806                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
807                         sg->length = 12;
808                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
809                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
810                         sg->length = 8;
811                 } else {
812                         sg->length = ses->iv.length;
813                 }
814                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
815                 in_sg->length += sg->length;
816                 cpu_to_hw_sg(sg);
817                 sg++;
818         }
819
820         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
821         sg->offset = data_offset;
822
823         if (data_len <= (mbuf->data_len - data_offset)) {
824                 sg->length = data_len;
825         } else {
826                 sg->length = mbuf->data_len - data_offset;
827
828                 /* remaining i/p segs */
829                 while ((data_len = data_len - sg->length) &&
830                        (mbuf = mbuf->next)) {
831                         cpu_to_hw_sg(sg);
832                         sg++;
833                         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
834                         if (data_len > mbuf->data_len)
835                                 sg->length = mbuf->data_len;
836                         else
837                                 sg->length = data_len;
838                 }
839         }
840
841         if (is_decode(ses)) {
842                 /* Digest verification case */
843                 cpu_to_hw_sg(sg);
844                 sg++;
845                 rte_memcpy(old_digest, sym->auth.digest.data,
846                                 ses->digest_length);
847                 start_addr = rte_dpaa_mem_vtop(old_digest);
848                 qm_sg_entry_set64(sg, start_addr);
849                 sg->length = ses->digest_length;
850                 in_sg->length += ses->digest_length;
851         }
852         sg->final = 1;
853         cpu_to_hw_sg(sg);
854         cpu_to_hw_sg(in_sg);
855
856         return cf;
857 }
858
859 /**
860  * packet looks like:
861  *              |<----data_len------->|
862  *    |ip_header|ah_header|icv|payload|
863  *              ^
864  *              |
865  *         mbuf->pkt.data
866  */
867 static inline struct dpaa_sec_job *
868 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
869 {
870         struct rte_crypto_sym_op *sym = op->sym;
871         struct rte_mbuf *mbuf = sym->m_src;
872         struct dpaa_sec_job *cf;
873         struct dpaa_sec_op_ctx *ctx;
874         struct qm_sg_entry *sg, *in_sg;
875         rte_iova_t start_addr;
876         uint8_t *old_digest;
877         int data_len, data_offset;
878
879         data_len = sym->auth.data.length;
880         data_offset = sym->auth.data.offset;
881
882         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
883             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
884                 if ((data_len & 7) || (data_offset & 7)) {
885                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
886                         return NULL;
887                 }
888
889                 data_len = data_len >> 3;
890                 data_offset = data_offset >> 3;
891         }
892
893         ctx = dpaa_sec_alloc_ctx(ses, 4);
894         if (!ctx)
895                 return NULL;
896
897         cf = &ctx->job;
898         ctx->op = op;
899         old_digest = ctx->digest;
900
901         start_addr = rte_pktmbuf_iova(mbuf);
902         /* output */
903         sg = &cf->sg[0];
904         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
905         sg->length = ses->digest_length;
906         cpu_to_hw_sg(sg);
907
908         /* input */
909         in_sg = &cf->sg[1];
910         /* need to extend the input to a compound frame */
911         in_sg->extension = 1;
912         in_sg->final = 1;
913         in_sg->length = data_len;
914         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
915         sg = &cf->sg[2];
916
917         if (ses->iv.length) {
918                 uint8_t *iv_ptr;
919
920                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
921                                                    ses->iv.offset);
922
923                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
924                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
925                         sg->length = 12;
926                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
927                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
928                         sg->length = 8;
929                 } else {
930                         sg->length = ses->iv.length;
931                 }
932                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
933                 in_sg->length += sg->length;
934                 cpu_to_hw_sg(sg);
935                 sg++;
936         }
937
938         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
939         sg->offset = data_offset;
940         sg->length = data_len;
941
942         if (is_decode(ses)) {
943                 /* Digest verification case */
944                 cpu_to_hw_sg(sg);
945                 /* hash result or digest, save digest first */
946                 rte_memcpy(old_digest, sym->auth.digest.data,
947                                 ses->digest_length);
948                 /* let's check digest by hw */
949                 start_addr = rte_dpaa_mem_vtop(old_digest);
950                 sg++;
951                 qm_sg_entry_set64(sg, start_addr);
952                 sg->length = ses->digest_length;
953                 in_sg->length += ses->digest_length;
954         }
955         sg->final = 1;
956         cpu_to_hw_sg(sg);
957         cpu_to_hw_sg(in_sg);
958
959         return cf;
960 }
961
962 static inline struct dpaa_sec_job *
963 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
964 {
965         struct rte_crypto_sym_op *sym = op->sym;
966         struct dpaa_sec_job *cf;
967         struct dpaa_sec_op_ctx *ctx;
968         struct qm_sg_entry *sg, *out_sg, *in_sg;
969         struct rte_mbuf *mbuf;
970         uint8_t req_segs;
971         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
972                         ses->iv.offset);
973         int data_len, data_offset;
974
975         data_len = sym->cipher.data.length;
976         data_offset = sym->cipher.data.offset;
977
978         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
979                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
980                 if ((data_len & 7) || (data_offset & 7)) {
981                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
982                         return NULL;
983                 }
984
985                 data_len = data_len >> 3;
986                 data_offset = data_offset >> 3;
987         }
988
989         if (sym->m_dst) {
990                 mbuf = sym->m_dst;
991                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
992         } else {
993                 mbuf = sym->m_src;
994                 req_segs = mbuf->nb_segs * 2 + 3;
995         }
996         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
997                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
998                                 MAX_SG_ENTRIES);
999                 return NULL;
1000         }
1001
1002         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1003         if (!ctx)
1004                 return NULL;
1005
1006         cf = &ctx->job;
1007         ctx->op = op;
1008
1009         /* output */
1010         out_sg = &cf->sg[0];
1011         out_sg->extension = 1;
1012         out_sg->length = data_len;
1013         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1014         cpu_to_hw_sg(out_sg);
1015
1016         /* 1st seg */
1017         sg = &cf->sg[2];
1018         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1019         sg->length = mbuf->data_len - data_offset;
1020         sg->offset = data_offset;
1021
1022         /* Successive segs */
1023         mbuf = mbuf->next;
1024         while (mbuf) {
1025                 cpu_to_hw_sg(sg);
1026                 sg++;
1027                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1028                 sg->length = mbuf->data_len;
1029                 mbuf = mbuf->next;
1030         }
1031         sg->final = 1;
1032         cpu_to_hw_sg(sg);
1033
1034         /* input */
1035         mbuf = sym->m_src;
1036         in_sg = &cf->sg[1];
1037         in_sg->extension = 1;
1038         in_sg->final = 1;
1039         in_sg->length = data_len + ses->iv.length;
1040
1041         sg++;
1042         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1043         cpu_to_hw_sg(in_sg);
1044
1045         /* IV */
1046         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1047         sg->length = ses->iv.length;
1048         cpu_to_hw_sg(sg);
1049
1050         /* 1st seg */
1051         sg++;
1052         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1053         sg->length = mbuf->data_len - data_offset;
1054         sg->offset = data_offset;
1055
1056         /* Successive segs */
1057         mbuf = mbuf->next;
1058         while (mbuf) {
1059                 cpu_to_hw_sg(sg);
1060                 sg++;
1061                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1062                 sg->length = mbuf->data_len;
1063                 mbuf = mbuf->next;
1064         }
1065         sg->final = 1;
1066         cpu_to_hw_sg(sg);
1067
1068         return cf;
1069 }
1070
1071 static inline struct dpaa_sec_job *
1072 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1073 {
1074         struct rte_crypto_sym_op *sym = op->sym;
1075         struct dpaa_sec_job *cf;
1076         struct dpaa_sec_op_ctx *ctx;
1077         struct qm_sg_entry *sg;
1078         rte_iova_t src_start_addr, dst_start_addr;
1079         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1080                         ses->iv.offset);
1081         int data_len, data_offset;
1082
1083         data_len = sym->cipher.data.length;
1084         data_offset = sym->cipher.data.offset;
1085
1086         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1087                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1088                 if ((data_len & 7) || (data_offset & 7)) {
1089                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1090                         return NULL;
1091                 }
1092
1093                 data_len = data_len >> 3;
1094                 data_offset = data_offset >> 3;
1095         }
1096
1097         ctx = dpaa_sec_alloc_ctx(ses, 4);
1098         if (!ctx)
1099                 return NULL;
1100
1101         cf = &ctx->job;
1102         ctx->op = op;
1103
1104         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1105
1106         if (sym->m_dst)
1107                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1108         else
1109                 dst_start_addr = src_start_addr;
1110
1111         /* output */
1112         sg = &cf->sg[0];
1113         qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1114         sg->length = data_len + ses->iv.length;
1115         cpu_to_hw_sg(sg);
1116
1117         /* input */
1118         sg = &cf->sg[1];
1119
1120         /* need to extend the input to a compound frame */
1121         sg->extension = 1;
1122         sg->final = 1;
1123         sg->length = data_len + ses->iv.length;
1124         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1125         cpu_to_hw_sg(sg);
1126
1127         sg = &cf->sg[2];
1128         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1129         sg->length = ses->iv.length;
1130         cpu_to_hw_sg(sg);
1131
1132         sg++;
1133         qm_sg_entry_set64(sg, src_start_addr + data_offset);
1134         sg->length = data_len;
1135         sg->final = 1;
1136         cpu_to_hw_sg(sg);
1137
1138         return cf;
1139 }
1140
1141 static inline struct dpaa_sec_job *
1142 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1143 {
1144         struct rte_crypto_sym_op *sym = op->sym;
1145         struct dpaa_sec_job *cf;
1146         struct dpaa_sec_op_ctx *ctx;
1147         struct qm_sg_entry *sg, *out_sg, *in_sg;
1148         struct rte_mbuf *mbuf;
1149         uint8_t req_segs;
1150         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1151                         ses->iv.offset);
1152
1153         if (sym->m_dst) {
1154                 mbuf = sym->m_dst;
1155                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1156         } else {
1157                 mbuf = sym->m_src;
1158                 req_segs = mbuf->nb_segs * 2 + 4;
1159         }
1160
1161         if (ses->auth_only_len)
1162                 req_segs++;
1163
1164         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1165                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1166                                 MAX_SG_ENTRIES);
1167                 return NULL;
1168         }
1169
1170         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1171         if (!ctx)
1172                 return NULL;
1173
1174         cf = &ctx->job;
1175         ctx->op = op;
1176
1177         rte_prefetch0(cf->sg);
1178
1179         /* output */
1180         out_sg = &cf->sg[0];
1181         out_sg->extension = 1;
1182         if (is_encode(ses))
1183                 out_sg->length = sym->aead.data.length + ses->digest_length;
1184         else
1185                 out_sg->length = sym->aead.data.length;
1186
1187         /* output sg entries */
1188         sg = &cf->sg[2];
1189         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1190         cpu_to_hw_sg(out_sg);
1191
1192         /* 1st seg */
1193         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1194         sg->length = mbuf->data_len - sym->aead.data.offset;
1195         sg->offset = sym->aead.data.offset;
1196
1197         /* Successive segs */
1198         mbuf = mbuf->next;
1199         while (mbuf) {
1200                 cpu_to_hw_sg(sg);
1201                 sg++;
1202                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1203                 sg->length = mbuf->data_len;
1204                 mbuf = mbuf->next;
1205         }
1206         sg->length -= ses->digest_length;
1207
1208         if (is_encode(ses)) {
1209                 cpu_to_hw_sg(sg);
1210                 /* set auth output */
1211                 sg++;
1212                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1213                 sg->length = ses->digest_length;
1214         }
1215         sg->final = 1;
1216         cpu_to_hw_sg(sg);
1217
1218         /* input */
1219         mbuf = sym->m_src;
1220         in_sg = &cf->sg[1];
1221         in_sg->extension = 1;
1222         in_sg->final = 1;
1223         if (is_encode(ses))
1224                 in_sg->length = ses->iv.length + sym->aead.data.length
1225                                                         + ses->auth_only_len;
1226         else
1227                 in_sg->length = ses->iv.length + sym->aead.data.length
1228                                 + ses->auth_only_len + ses->digest_length;
1229
1230         /* input sg entries */
1231         sg++;
1232         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1233         cpu_to_hw_sg(in_sg);
1234
1235         /* 1st seg IV */
1236         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1237         sg->length = ses->iv.length;
1238         cpu_to_hw_sg(sg);
1239
1240         /* 2nd seg auth only */
1241         if (ses->auth_only_len) {
1242                 sg++;
1243                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1244                 sg->length = ses->auth_only_len;
1245                 cpu_to_hw_sg(sg);
1246         }
1247
1248         /* 3rd seg */
1249         sg++;
1250         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1251         sg->length = mbuf->data_len - sym->aead.data.offset;
1252         sg->offset = sym->aead.data.offset;
1253
1254         /* Successive segs */
1255         mbuf = mbuf->next;
1256         while (mbuf) {
1257                 cpu_to_hw_sg(sg);
1258                 sg++;
1259                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1260                 sg->length = mbuf->data_len;
1261                 mbuf = mbuf->next;
1262         }
1263
1264         if (is_decode(ses)) {
1265                 cpu_to_hw_sg(sg);
1266                 sg++;
1267                 memcpy(ctx->digest, sym->aead.digest.data,
1268                         ses->digest_length);
1269                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1270                 sg->length = ses->digest_length;
1271         }
1272         sg->final = 1;
1273         cpu_to_hw_sg(sg);
1274
1275         return cf;
1276 }
1277
1278 static inline struct dpaa_sec_job *
1279 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1280 {
1281         struct rte_crypto_sym_op *sym = op->sym;
1282         struct dpaa_sec_job *cf;
1283         struct dpaa_sec_op_ctx *ctx;
1284         struct qm_sg_entry *sg;
1285         uint32_t length = 0;
1286         rte_iova_t src_start_addr, dst_start_addr;
1287         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1288                         ses->iv.offset);
1289
1290         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1291
1292         if (sym->m_dst)
1293                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1294         else
1295                 dst_start_addr = src_start_addr;
1296
1297         ctx = dpaa_sec_alloc_ctx(ses, 7);
1298         if (!ctx)
1299                 return NULL;
1300
1301         cf = &ctx->job;
1302         ctx->op = op;
1303
1304         /* input */
1305         rte_prefetch0(cf->sg);
1306         sg = &cf->sg[2];
1307         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1308         if (is_encode(ses)) {
1309                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1310                 sg->length = ses->iv.length;
1311                 length += sg->length;
1312                 cpu_to_hw_sg(sg);
1313
1314                 sg++;
1315                 if (ses->auth_only_len) {
1316                         qm_sg_entry_set64(sg,
1317                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1318                         sg->length = ses->auth_only_len;
1319                         length += sg->length;
1320                         cpu_to_hw_sg(sg);
1321                         sg++;
1322                 }
1323                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1324                 sg->length = sym->aead.data.length;
1325                 length += sg->length;
1326                 sg->final = 1;
1327                 cpu_to_hw_sg(sg);
1328         } else {
1329                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1330                 sg->length = ses->iv.length;
1331                 length += sg->length;
1332                 cpu_to_hw_sg(sg);
1333
1334                 sg++;
1335                 if (ses->auth_only_len) {
1336                         qm_sg_entry_set64(sg,
1337                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1338                         sg->length = ses->auth_only_len;
1339                         length += sg->length;
1340                         cpu_to_hw_sg(sg);
1341                         sg++;
1342                 }
1343                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1344                 sg->length = sym->aead.data.length;
1345                 length += sg->length;
1346                 cpu_to_hw_sg(sg);
1347
1348                 memcpy(ctx->digest, sym->aead.digest.data,
1349                        ses->digest_length);
1350                 sg++;
1351
1352                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1353                 sg->length = ses->digest_length;
1354                 length += sg->length;
1355                 sg->final = 1;
1356                 cpu_to_hw_sg(sg);
1357         }
1358         /* input compound frame */
1359         cf->sg[1].length = length;
1360         cf->sg[1].extension = 1;
1361         cf->sg[1].final = 1;
1362         cpu_to_hw_sg(&cf->sg[1]);
1363
1364         /* output */
1365         sg++;
1366         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1367         qm_sg_entry_set64(sg,
1368                 dst_start_addr + sym->aead.data.offset);
1369         sg->length = sym->aead.data.length;
1370         length = sg->length;
1371         if (is_encode(ses)) {
1372                 cpu_to_hw_sg(sg);
1373                 /* set auth output */
1374                 sg++;
1375                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1376                 sg->length = ses->digest_length;
1377                 length += sg->length;
1378         }
1379         sg->final = 1;
1380         cpu_to_hw_sg(sg);
1381
1382         /* output compound frame */
1383         cf->sg[0].length = length;
1384         cf->sg[0].extension = 1;
1385         cpu_to_hw_sg(&cf->sg[0]);
1386
1387         return cf;
1388 }
1389
1390 static inline struct dpaa_sec_job *
1391 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1392 {
1393         struct rte_crypto_sym_op *sym = op->sym;
1394         struct dpaa_sec_job *cf;
1395         struct dpaa_sec_op_ctx *ctx;
1396         struct qm_sg_entry *sg, *out_sg, *in_sg;
1397         struct rte_mbuf *mbuf;
1398         uint8_t req_segs;
1399         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1400                         ses->iv.offset);
1401
1402         if (sym->m_dst) {
1403                 mbuf = sym->m_dst;
1404                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1405         } else {
1406                 mbuf = sym->m_src;
1407                 req_segs = mbuf->nb_segs * 2 + 4;
1408         }
1409
1410         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1411                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1412                                 MAX_SG_ENTRIES);
1413                 return NULL;
1414         }
1415
1416         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1417         if (!ctx)
1418                 return NULL;
1419
1420         cf = &ctx->job;
1421         ctx->op = op;
1422
1423         rte_prefetch0(cf->sg);
1424
1425         /* output */
1426         out_sg = &cf->sg[0];
1427         out_sg->extension = 1;
1428         if (is_encode(ses))
1429                 out_sg->length = sym->auth.data.length + ses->digest_length;
1430         else
1431                 out_sg->length = sym->auth.data.length;
1432
1433         /* output sg entries */
1434         sg = &cf->sg[2];
1435         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1436         cpu_to_hw_sg(out_sg);
1437
1438         /* 1st seg */
1439         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1440         sg->length = mbuf->data_len - sym->auth.data.offset;
1441         sg->offset = sym->auth.data.offset;
1442
1443         /* Successive segs */
1444         mbuf = mbuf->next;
1445         while (mbuf) {
1446                 cpu_to_hw_sg(sg);
1447                 sg++;
1448                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1449                 sg->length = mbuf->data_len;
1450                 mbuf = mbuf->next;
1451         }
1452         sg->length -= ses->digest_length;
1453
1454         if (is_encode(ses)) {
1455                 cpu_to_hw_sg(sg);
1456                 /* set auth output */
1457                 sg++;
1458                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1459                 sg->length = ses->digest_length;
1460         }
1461         sg->final = 1;
1462         cpu_to_hw_sg(sg);
1463
1464         /* input */
1465         mbuf = sym->m_src;
1466         in_sg = &cf->sg[1];
1467         in_sg->extension = 1;
1468         in_sg->final = 1;
1469         if (is_encode(ses))
1470                 in_sg->length = ses->iv.length + sym->auth.data.length;
1471         else
1472                 in_sg->length = ses->iv.length + sym->auth.data.length
1473                                                 + ses->digest_length;
1474
1475         /* input sg entries */
1476         sg++;
1477         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1478         cpu_to_hw_sg(in_sg);
1479
1480         /* 1st seg IV */
1481         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1482         sg->length = ses->iv.length;
1483         cpu_to_hw_sg(sg);
1484
1485         /* 2nd seg */
1486         sg++;
1487         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1488         sg->length = mbuf->data_len - sym->auth.data.offset;
1489         sg->offset = sym->auth.data.offset;
1490
1491         /* Successive segs */
1492         mbuf = mbuf->next;
1493         while (mbuf) {
1494                 cpu_to_hw_sg(sg);
1495                 sg++;
1496                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1497                 sg->length = mbuf->data_len;
1498                 mbuf = mbuf->next;
1499         }
1500
1501         sg->length -= ses->digest_length;
1502         if (is_decode(ses)) {
1503                 cpu_to_hw_sg(sg);
1504                 sg++;
1505                 memcpy(ctx->digest, sym->auth.digest.data,
1506                         ses->digest_length);
1507                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1508                 sg->length = ses->digest_length;
1509         }
1510         sg->final = 1;
1511         cpu_to_hw_sg(sg);
1512
1513         return cf;
1514 }
1515
1516 static inline struct dpaa_sec_job *
1517 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1518 {
1519         struct rte_crypto_sym_op *sym = op->sym;
1520         struct dpaa_sec_job *cf;
1521         struct dpaa_sec_op_ctx *ctx;
1522         struct qm_sg_entry *sg;
1523         rte_iova_t src_start_addr, dst_start_addr;
1524         uint32_t length = 0;
1525         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1526                         ses->iv.offset);
1527
1528         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1529         if (sym->m_dst)
1530                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1531         else
1532                 dst_start_addr = src_start_addr;
1533
1534         ctx = dpaa_sec_alloc_ctx(ses, 7);
1535         if (!ctx)
1536                 return NULL;
1537
1538         cf = &ctx->job;
1539         ctx->op = op;
1540
1541         /* input */
1542         rte_prefetch0(cf->sg);
1543         sg = &cf->sg[2];
1544         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1545         if (is_encode(ses)) {
1546                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1547                 sg->length = ses->iv.length;
1548                 length += sg->length;
1549                 cpu_to_hw_sg(sg);
1550
1551                 sg++;
1552                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1553                 sg->length = sym->auth.data.length;
1554                 length += sg->length;
1555                 sg->final = 1;
1556                 cpu_to_hw_sg(sg);
1557         } else {
1558                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1559                 sg->length = ses->iv.length;
1560                 length += sg->length;
1561                 cpu_to_hw_sg(sg);
1562
1563                 sg++;
1564
1565                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1566                 sg->length = sym->auth.data.length;
1567                 length += sg->length;
1568                 cpu_to_hw_sg(sg);
1569
1570                 memcpy(ctx->digest, sym->auth.digest.data,
1571                        ses->digest_length);
1572                 sg++;
1573
1574                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1575                 sg->length = ses->digest_length;
1576                 length += sg->length;
1577                 sg->final = 1;
1578                 cpu_to_hw_sg(sg);
1579         }
1580         /* input compound frame */
1581         cf->sg[1].length = length;
1582         cf->sg[1].extension = 1;
1583         cf->sg[1].final = 1;
1584         cpu_to_hw_sg(&cf->sg[1]);
1585
1586         /* output */
1587         sg++;
1588         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1589         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1590         sg->length = sym->cipher.data.length;
1591         length = sg->length;
1592         if (is_encode(ses)) {
1593                 cpu_to_hw_sg(sg);
1594                 /* set auth output */
1595                 sg++;
1596                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1597                 sg->length = ses->digest_length;
1598                 length += sg->length;
1599         }
1600         sg->final = 1;
1601         cpu_to_hw_sg(sg);
1602
1603         /* output compound frame */
1604         cf->sg[0].length = length;
1605         cf->sg[0].extension = 1;
1606         cpu_to_hw_sg(&cf->sg[0]);
1607
1608         return cf;
1609 }
1610
1611 #ifdef RTE_LIB_SECURITY
1612 static inline struct dpaa_sec_job *
1613 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1614 {
1615         struct rte_crypto_sym_op *sym = op->sym;
1616         struct dpaa_sec_job *cf;
1617         struct dpaa_sec_op_ctx *ctx;
1618         struct qm_sg_entry *sg;
1619         phys_addr_t src_start_addr, dst_start_addr;
1620
1621         ctx = dpaa_sec_alloc_ctx(ses, 2);
1622         if (!ctx)
1623                 return NULL;
1624         cf = &ctx->job;
1625         ctx->op = op;
1626
1627         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1628
1629         if (sym->m_dst)
1630                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1631         else
1632                 dst_start_addr = src_start_addr;
1633
1634         /* input */
1635         sg = &cf->sg[1];
1636         qm_sg_entry_set64(sg, src_start_addr);
1637         sg->length = sym->m_src->pkt_len;
1638         sg->final = 1;
1639         cpu_to_hw_sg(sg);
1640
1641         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1642         /* output */
1643         sg = &cf->sg[0];
1644         qm_sg_entry_set64(sg, dst_start_addr);
1645         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1646         cpu_to_hw_sg(sg);
1647
1648         return cf;
1649 }
1650
1651 static inline struct dpaa_sec_job *
1652 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1653 {
1654         struct rte_crypto_sym_op *sym = op->sym;
1655         struct dpaa_sec_job *cf;
1656         struct dpaa_sec_op_ctx *ctx;
1657         struct qm_sg_entry *sg, *out_sg, *in_sg;
1658         struct rte_mbuf *mbuf;
1659         uint8_t req_segs;
1660         uint32_t in_len = 0, out_len = 0;
1661
1662         if (sym->m_dst)
1663                 mbuf = sym->m_dst;
1664         else
1665                 mbuf = sym->m_src;
1666
1667         req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1668         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1669                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1670                                 MAX_SG_ENTRIES);
1671                 return NULL;
1672         }
1673
1674         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1675         if (!ctx)
1676                 return NULL;
1677         cf = &ctx->job;
1678         ctx->op = op;
1679         /* output */
1680         out_sg = &cf->sg[0];
1681         out_sg->extension = 1;
1682         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1683
1684         /* 1st seg */
1685         sg = &cf->sg[2];
1686         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1687         sg->offset = 0;
1688
1689         /* Successive segs */
1690         while (mbuf->next) {
1691                 sg->length = mbuf->data_len;
1692                 out_len += sg->length;
1693                 mbuf = mbuf->next;
1694                 cpu_to_hw_sg(sg);
1695                 sg++;
1696                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1697                 sg->offset = 0;
1698         }
1699         sg->length = mbuf->buf_len - mbuf->data_off;
1700         out_len += sg->length;
1701         sg->final = 1;
1702         cpu_to_hw_sg(sg);
1703
1704         out_sg->length = out_len;
1705         cpu_to_hw_sg(out_sg);
1706
1707         /* input */
1708         mbuf = sym->m_src;
1709         in_sg = &cf->sg[1];
1710         in_sg->extension = 1;
1711         in_sg->final = 1;
1712         in_len = mbuf->data_len;
1713
1714         sg++;
1715         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1716
1717         /* 1st seg */
1718         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1719         sg->length = mbuf->data_len;
1720         sg->offset = 0;
1721
1722         /* Successive segs */
1723         mbuf = mbuf->next;
1724         while (mbuf) {
1725                 cpu_to_hw_sg(sg);
1726                 sg++;
1727                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1728                 sg->length = mbuf->data_len;
1729                 sg->offset = 0;
1730                 in_len += sg->length;
1731                 mbuf = mbuf->next;
1732         }
1733         sg->final = 1;
1734         cpu_to_hw_sg(sg);
1735
1736         in_sg->length = in_len;
1737         cpu_to_hw_sg(in_sg);
1738
1739         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1740
1741         return cf;
1742 }
1743 #endif
1744
1745 static uint16_t
1746 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1747                        uint16_t nb_ops)
1748 {
1749         /* Function to transmit the frames to given device and queuepair */
1750         uint32_t loop;
1751         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1752         uint16_t num_tx = 0;
1753         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1754         uint32_t frames_to_send;
1755         struct rte_crypto_op *op;
1756         struct dpaa_sec_job *cf;
1757         dpaa_sec_session *ses;
1758         uint16_t auth_hdr_len, auth_tail_len;
1759         uint32_t index, flags[DPAA_SEC_BURST] = {0};
1760         struct qman_fq *inq[DPAA_SEC_BURST];
1761
1762         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1763                 if (rte_dpaa_portal_init((void *)0)) {
1764                         DPAA_SEC_ERR("Failure in affining portal");
1765                         return 0;
1766                 }
1767         }
1768
1769         while (nb_ops) {
1770                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1771                                 DPAA_SEC_BURST : nb_ops;
1772                 for (loop = 0; loop < frames_to_send; loop++) {
1773                         op = *(ops++);
1774                         if (*dpaa_seqn(op->sym->m_src) != 0) {
1775                                 index = *dpaa_seqn(op->sym->m_src) - 1;
1776                                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1777                                         /* QM_EQCR_DCA_IDXMASK = 0x0f */
1778                                         flags[loop] = ((index & 0x0f) << 8);
1779                                         flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1780                                         DPAA_PER_LCORE_DQRR_SIZE--;
1781                                         DPAA_PER_LCORE_DQRR_HELD &=
1782                                                                 ~(1 << index);
1783                                 }
1784                         }
1785
1786                         switch (op->sess_type) {
1787                         case RTE_CRYPTO_OP_WITH_SESSION:
1788                                 ses = (dpaa_sec_session *)
1789                                         get_sym_session_private_data(
1790                                                         op->sym->session,
1791                                                         cryptodev_driver_id);
1792                                 break;
1793 #ifdef RTE_LIB_SECURITY
1794                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1795                                 ses = (dpaa_sec_session *)
1796                                         get_sec_session_private_data(
1797                                                         op->sym->sec_session);
1798                                 break;
1799 #endif
1800                         default:
1801                                 DPAA_SEC_DP_ERR(
1802                                         "sessionless crypto op not supported");
1803                                 frames_to_send = loop;
1804                                 nb_ops = loop;
1805                                 goto send_pkts;
1806                         }
1807
1808                         if (!ses) {
1809                                 DPAA_SEC_DP_ERR("session not available");
1810                                 frames_to_send = loop;
1811                                 nb_ops = loop;
1812                                 goto send_pkts;
1813                         }
1814
1815                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1816                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1817                                         frames_to_send = loop;
1818                                         nb_ops = loop;
1819                                         goto send_pkts;
1820                                 }
1821                         } else if (unlikely(ses->qp[rte_lcore_id() %
1822                                                 MAX_DPAA_CORES] != qp)) {
1823                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1824                                         " New qp = %p\n",
1825                                         ses->qp[rte_lcore_id() %
1826                                         MAX_DPAA_CORES], qp);
1827                                 frames_to_send = loop;
1828                                 nb_ops = loop;
1829                                 goto send_pkts;
1830                         }
1831
1832                         auth_hdr_len = op->sym->auth.data.length -
1833                                                 op->sym->cipher.data.length;
1834                         auth_tail_len = 0;
1835
1836                         if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1837                                   ((op->sym->m_dst == NULL) ||
1838                                    rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1839                                 switch (ses->ctxt) {
1840 #ifdef RTE_LIB_SECURITY
1841                                 case DPAA_SEC_PDCP:
1842                                 case DPAA_SEC_IPSEC:
1843                                         cf = build_proto(op, ses);
1844                                         break;
1845 #endif
1846                                 case DPAA_SEC_AUTH:
1847                                         cf = build_auth_only(op, ses);
1848                                         break;
1849                                 case DPAA_SEC_CIPHER:
1850                                         cf = build_cipher_only(op, ses);
1851                                         break;
1852                                 case DPAA_SEC_AEAD:
1853                                         cf = build_cipher_auth_gcm(op, ses);
1854                                         auth_hdr_len = ses->auth_only_len;
1855                                         break;
1856                                 case DPAA_SEC_CIPHER_HASH:
1857                                         auth_hdr_len =
1858                                                 op->sym->cipher.data.offset
1859                                                 - op->sym->auth.data.offset;
1860                                         auth_tail_len =
1861                                                 op->sym->auth.data.length
1862                                                 - op->sym->cipher.data.length
1863                                                 - auth_hdr_len;
1864                                         cf = build_cipher_auth(op, ses);
1865                                         break;
1866                                 default:
1867                                         DPAA_SEC_DP_ERR("not supported ops");
1868                                         frames_to_send = loop;
1869                                         nb_ops = loop;
1870                                         goto send_pkts;
1871                                 }
1872                         } else {
1873                                 switch (ses->ctxt) {
1874 #ifdef RTE_LIB_SECURITY
1875                                 case DPAA_SEC_PDCP:
1876                                 case DPAA_SEC_IPSEC:
1877                                         cf = build_proto_sg(op, ses);
1878                                         break;
1879 #endif
1880                                 case DPAA_SEC_AUTH:
1881                                         cf = build_auth_only_sg(op, ses);
1882                                         break;
1883                                 case DPAA_SEC_CIPHER:
1884                                         cf = build_cipher_only_sg(op, ses);
1885                                         break;
1886                                 case DPAA_SEC_AEAD:
1887                                         cf = build_cipher_auth_gcm_sg(op, ses);
1888                                         auth_hdr_len = ses->auth_only_len;
1889                                         break;
1890                                 case DPAA_SEC_CIPHER_HASH:
1891                                         auth_hdr_len =
1892                                                 op->sym->cipher.data.offset
1893                                                 - op->sym->auth.data.offset;
1894                                         auth_tail_len =
1895                                                 op->sym->auth.data.length
1896                                                 - op->sym->cipher.data.length
1897                                                 - auth_hdr_len;
1898                                         cf = build_cipher_auth_sg(op, ses);
1899                                         break;
1900                                 default:
1901                                         DPAA_SEC_DP_ERR("not supported ops");
1902                                         frames_to_send = loop;
1903                                         nb_ops = loop;
1904                                         goto send_pkts;
1905                                 }
1906                         }
1907                         if (unlikely(!cf)) {
1908                                 frames_to_send = loop;
1909                                 nb_ops = loop;
1910                                 goto send_pkts;
1911                         }
1912
1913                         fd = &fds[loop];
1914                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1915                         fd->opaque_addr = 0;
1916                         fd->cmd = 0;
1917                         qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1918                         fd->_format1 = qm_fd_compound;
1919                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1920
1921                         /* Auth_only_len is set as 0 in descriptor and it is
1922                          * overwritten here in the fd.cmd which will update
1923                          * the DPOVRD reg.
1924                          */
1925                         if (auth_hdr_len || auth_tail_len) {
1926                                 fd->cmd = 0x80000000;
1927                                 fd->cmd |=
1928                                         ((auth_tail_len << 16) | auth_hdr_len);
1929                         }
1930
1931 #ifdef RTE_LIB_SECURITY
1932                         /* In case of PDCP, per packet HFN is stored in
1933                          * mbuf priv after sym_op.
1934                          */
1935                         if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1936                                 fd->cmd = 0x80000000 |
1937                                         *((uint32_t *)((uint8_t *)op +
1938                                         ses->pdcp.hfn_ovd_offset));
1939                                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1940                                         *((uint32_t *)((uint8_t *)op +
1941                                         ses->pdcp.hfn_ovd_offset)),
1942                                         ses->pdcp.hfn_ovd);
1943                         }
1944 #endif
1945                 }
1946 send_pkts:
1947                 loop = 0;
1948                 while (loop < frames_to_send) {
1949                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1950                                         &flags[loop], frames_to_send - loop);
1951                 }
1952                 nb_ops -= frames_to_send;
1953                 num_tx += frames_to_send;
1954         }
1955
1956         dpaa_qp->tx_pkts += num_tx;
1957         dpaa_qp->tx_errs += nb_ops - num_tx;
1958
1959         return num_tx;
1960 }
1961
1962 static uint16_t
1963 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1964                        uint16_t nb_ops)
1965 {
1966         uint16_t num_rx;
1967         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1968
1969         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1970                 if (rte_dpaa_portal_init((void *)0)) {
1971                         DPAA_SEC_ERR("Failure in affining portal");
1972                         return 0;
1973                 }
1974         }
1975
1976         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1977
1978         dpaa_qp->rx_pkts += num_rx;
1979         dpaa_qp->rx_errs += nb_ops - num_rx;
1980
1981         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1982
1983         return num_rx;
1984 }
1985
1986 /** Release queue pair */
1987 static int
1988 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1989                             uint16_t qp_id)
1990 {
1991         struct dpaa_sec_dev_private *internals;
1992         struct dpaa_sec_qp *qp = NULL;
1993
1994         PMD_INIT_FUNC_TRACE();
1995
1996         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1997
1998         internals = dev->data->dev_private;
1999         if (qp_id >= internals->max_nb_queue_pairs) {
2000                 DPAA_SEC_ERR("Max supported qpid %d",
2001                              internals->max_nb_queue_pairs);
2002                 return -EINVAL;
2003         }
2004
2005         qp = &internals->qps[qp_id];
2006         rte_mempool_free(qp->ctx_pool);
2007         qp->internals = NULL;
2008         dev->data->queue_pairs[qp_id] = NULL;
2009
2010         return 0;
2011 }
2012
2013 /** Setup a queue pair */
2014 static int
2015 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2016                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2017                 __rte_unused int socket_id)
2018 {
2019         struct dpaa_sec_dev_private *internals;
2020         struct dpaa_sec_qp *qp = NULL;
2021         char str[20];
2022
2023         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2024
2025         internals = dev->data->dev_private;
2026         if (qp_id >= internals->max_nb_queue_pairs) {
2027                 DPAA_SEC_ERR("Max supported qpid %d",
2028                              internals->max_nb_queue_pairs);
2029                 return -EINVAL;
2030         }
2031
2032         qp = &internals->qps[qp_id];
2033         qp->internals = internals;
2034         snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2035                         dev->data->dev_id, qp_id);
2036         if (!qp->ctx_pool) {
2037                 qp->ctx_pool = rte_mempool_create((const char *)str,
2038                                                         CTX_POOL_NUM_BUFS,
2039                                                         CTX_POOL_BUF_SIZE,
2040                                                         CTX_POOL_CACHE_SIZE, 0,
2041                                                         NULL, NULL, NULL, NULL,
2042                                                         SOCKET_ID_ANY, 0);
2043                 if (!qp->ctx_pool) {
2044                         DPAA_SEC_ERR("%s create failed\n", str);
2045                         return -ENOMEM;
2046                 }
2047         } else
2048                 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2049                                 dev->data->dev_id, qp_id);
2050         dev->data->queue_pairs[qp_id] = qp;
2051
2052         return 0;
2053 }
2054
2055 /** Returns the size of session structure */
2056 static unsigned int
2057 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2058 {
2059         PMD_INIT_FUNC_TRACE();
2060
2061         return sizeof(dpaa_sec_session);
2062 }
2063
2064 static int
2065 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2066                      struct rte_crypto_sym_xform *xform,
2067                      dpaa_sec_session *session)
2068 {
2069         session->ctxt = DPAA_SEC_CIPHER;
2070         session->cipher_alg = xform->cipher.algo;
2071         session->iv.length = xform->cipher.iv.length;
2072         session->iv.offset = xform->cipher.iv.offset;
2073         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2074                                                RTE_CACHE_LINE_SIZE);
2075         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2076                 DPAA_SEC_ERR("No Memory for cipher key");
2077                 return -ENOMEM;
2078         }
2079         session->cipher_key.length = xform->cipher.key.length;
2080
2081         memcpy(session->cipher_key.data, xform->cipher.key.data,
2082                xform->cipher.key.length);
2083         switch (xform->cipher.algo) {
2084         case RTE_CRYPTO_CIPHER_AES_CBC:
2085                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2086                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2087                 break;
2088         case RTE_CRYPTO_CIPHER_DES_CBC:
2089                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2090                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2091                 break;
2092         case RTE_CRYPTO_CIPHER_3DES_CBC:
2093                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2094                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2095                 break;
2096         case RTE_CRYPTO_CIPHER_AES_CTR:
2097                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2098                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2099                 break;
2100         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2101                 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2102                 break;
2103         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2104                 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2105                 break;
2106         default:
2107                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2108                               xform->cipher.algo);
2109                 return -ENOTSUP;
2110         }
2111         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2112                         DIR_ENC : DIR_DEC;
2113
2114         return 0;
2115 }
2116
2117 static int
2118 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2119                    struct rte_crypto_sym_xform *xform,
2120                    dpaa_sec_session *session)
2121 {
2122         session->ctxt = DPAA_SEC_AUTH;
2123         session->auth_alg = xform->auth.algo;
2124         session->auth_key.length = xform->auth.key.length;
2125         if (xform->auth.key.length) {
2126                 session->auth_key.data =
2127                                 rte_zmalloc(NULL, xform->auth.key.length,
2128                                              RTE_CACHE_LINE_SIZE);
2129                 if (session->auth_key.data == NULL) {
2130                         DPAA_SEC_ERR("No Memory for auth key");
2131                         return -ENOMEM;
2132                 }
2133                 memcpy(session->auth_key.data, xform->auth.key.data,
2134                                 xform->auth.key.length);
2135
2136         }
2137         session->digest_length = xform->auth.digest_length;
2138         if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2139                 session->iv.offset = xform->auth.iv.offset;
2140                 session->iv.length = xform->auth.iv.length;
2141         }
2142
2143         switch (xform->auth.algo) {
2144         case RTE_CRYPTO_AUTH_SHA1:
2145                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2146                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2147                 break;
2148         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2149                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2150                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2151                 break;
2152         case RTE_CRYPTO_AUTH_MD5:
2153                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2154                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2155                 break;
2156         case RTE_CRYPTO_AUTH_MD5_HMAC:
2157                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2158                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2159                 break;
2160         case RTE_CRYPTO_AUTH_SHA224:
2161                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2162                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2163                 break;
2164         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2165                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2166                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2167                 break;
2168         case RTE_CRYPTO_AUTH_SHA256:
2169                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2170                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2171                 break;
2172         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2173                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2174                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2175                 break;
2176         case RTE_CRYPTO_AUTH_SHA384:
2177                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2178                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2179                 break;
2180         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2181                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2182                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2183                 break;
2184         case RTE_CRYPTO_AUTH_SHA512:
2185                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2186                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2187                 break;
2188         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2189                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2190                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2191                 break;
2192         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2193                 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2194                 session->auth_key.algmode = OP_ALG_AAI_F9;
2195                 break;
2196         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2197                 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2198                 session->auth_key.algmode = OP_ALG_AAI_F9;
2199                 break;
2200         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2201                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2202                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2203                 break;
2204         case RTE_CRYPTO_AUTH_AES_CMAC:
2205                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2206                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2207                 break;
2208         default:
2209                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2210                               xform->auth.algo);
2211                 return -ENOTSUP;
2212         }
2213
2214         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2215                         DIR_ENC : DIR_DEC;
2216
2217         return 0;
2218 }
2219
2220 static int
2221 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2222                    struct rte_crypto_sym_xform *xform,
2223                    dpaa_sec_session *session)
2224 {
2225
2226         struct rte_crypto_cipher_xform *cipher_xform;
2227         struct rte_crypto_auth_xform *auth_xform;
2228
2229         session->ctxt = DPAA_SEC_CIPHER_HASH;
2230         if (session->auth_cipher_text) {
2231                 cipher_xform = &xform->cipher;
2232                 auth_xform = &xform->next->auth;
2233         } else {
2234                 cipher_xform = &xform->next->cipher;
2235                 auth_xform = &xform->auth;
2236         }
2237
2238         /* Set IV parameters */
2239         session->iv.offset = cipher_xform->iv.offset;
2240         session->iv.length = cipher_xform->iv.length;
2241
2242         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2243                                                RTE_CACHE_LINE_SIZE);
2244         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2245                 DPAA_SEC_ERR("No Memory for cipher key");
2246                 return -ENOMEM;
2247         }
2248         session->cipher_key.length = cipher_xform->key.length;
2249         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2250                                              RTE_CACHE_LINE_SIZE);
2251         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2252                 DPAA_SEC_ERR("No Memory for auth key");
2253                 return -ENOMEM;
2254         }
2255         session->auth_key.length = auth_xform->key.length;
2256         memcpy(session->cipher_key.data, cipher_xform->key.data,
2257                cipher_xform->key.length);
2258         memcpy(session->auth_key.data, auth_xform->key.data,
2259                auth_xform->key.length);
2260
2261         session->digest_length = auth_xform->digest_length;
2262         session->auth_alg = auth_xform->algo;
2263
2264         switch (auth_xform->algo) {
2265         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2266                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2267                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2268                 break;
2269         case RTE_CRYPTO_AUTH_MD5_HMAC:
2270                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2271                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2272                 break;
2273         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2274                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2275                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2276                 break;
2277         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2278                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2279                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2280                 break;
2281         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2282                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2283                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2284                 break;
2285         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2286                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2287                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2288                 break;
2289         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2290                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2291                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2292                 break;
2293         case RTE_CRYPTO_AUTH_AES_CMAC:
2294                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2295                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2296                 break;
2297         default:
2298                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2299                               auth_xform->algo);
2300                 return -ENOTSUP;
2301         }
2302
2303         session->cipher_alg = cipher_xform->algo;
2304
2305         switch (cipher_xform->algo) {
2306         case RTE_CRYPTO_CIPHER_AES_CBC:
2307                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2308                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2309                 break;
2310         case RTE_CRYPTO_CIPHER_DES_CBC:
2311                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2312                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2313                 break;
2314         case RTE_CRYPTO_CIPHER_3DES_CBC:
2315                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2316                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2317                 break;
2318         case RTE_CRYPTO_CIPHER_AES_CTR:
2319                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2320                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2321                 break;
2322         default:
2323                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2324                               cipher_xform->algo);
2325                 return -ENOTSUP;
2326         }
2327         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2328                                 DIR_ENC : DIR_DEC;
2329         return 0;
2330 }
2331
2332 static int
2333 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2334                    struct rte_crypto_sym_xform *xform,
2335                    dpaa_sec_session *session)
2336 {
2337         session->aead_alg = xform->aead.algo;
2338         session->ctxt = DPAA_SEC_AEAD;
2339         session->iv.length = xform->aead.iv.length;
2340         session->iv.offset = xform->aead.iv.offset;
2341         session->auth_only_len = xform->aead.aad_length;
2342         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2343                                              RTE_CACHE_LINE_SIZE);
2344         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2345                 DPAA_SEC_ERR("No Memory for aead key\n");
2346                 return -ENOMEM;
2347         }
2348         session->aead_key.length = xform->aead.key.length;
2349         session->digest_length = xform->aead.digest_length;
2350
2351         memcpy(session->aead_key.data, xform->aead.key.data,
2352                xform->aead.key.length);
2353
2354         switch (session->aead_alg) {
2355         case RTE_CRYPTO_AEAD_AES_GCM:
2356                 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2357                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2358                 break;
2359         default:
2360                 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2361                 return -ENOTSUP;
2362         }
2363
2364         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2365                         DIR_ENC : DIR_DEC;
2366
2367         return 0;
2368 }
2369
2370 static struct qman_fq *
2371 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2372 {
2373         unsigned int i;
2374
2375         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2376                 if (qi->inq_attach[i] == 0) {
2377                         qi->inq_attach[i] = 1;
2378                         return &qi->inq[i];
2379                 }
2380         }
2381         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2382
2383         return NULL;
2384 }
2385
2386 static int
2387 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2388 {
2389         unsigned int i;
2390
2391         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2392                 if (&qi->inq[i] == fq) {
2393                         if (qman_retire_fq(fq, NULL) != 0)
2394                                 DPAA_SEC_DEBUG("Queue is not retired\n");
2395                         qman_oos_fq(fq);
2396                         qi->inq_attach[i] = 0;
2397                         return 0;
2398                 }
2399         }
2400         return -1;
2401 }
2402
2403 static int
2404 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2405 {
2406         int ret;
2407
2408         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2409         ret = dpaa_sec_prep_cdb(sess);
2410         if (ret) {
2411                 DPAA_SEC_ERR("Unable to prepare sec cdb");
2412                 return ret;
2413         }
2414         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2415                 ret = rte_dpaa_portal_init((void *)0);
2416                 if (ret) {
2417                         DPAA_SEC_ERR("Failure in affining portal");
2418                         return ret;
2419                 }
2420         }
2421         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2422                                rte_dpaa_mem_vtop(&sess->cdb),
2423                                qman_fq_fqid(&qp->outq));
2424         if (ret)
2425                 DPAA_SEC_ERR("Unable to init sec queue");
2426
2427         return ret;
2428 }
2429
2430 static inline void
2431 free_session_data(dpaa_sec_session *s)
2432 {
2433         if (is_aead(s))
2434                 rte_free(s->aead_key.data);
2435         else {
2436                 rte_free(s->auth_key.data);
2437                 rte_free(s->cipher_key.data);
2438         }
2439         memset(s, 0, sizeof(dpaa_sec_session));
2440 }
2441
2442 static int
2443 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2444                             struct rte_crypto_sym_xform *xform, void *sess)
2445 {
2446         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2447         dpaa_sec_session *session = sess;
2448         uint32_t i;
2449         int ret;
2450
2451         PMD_INIT_FUNC_TRACE();
2452
2453         if (unlikely(sess == NULL)) {
2454                 DPAA_SEC_ERR("invalid session struct");
2455                 return -EINVAL;
2456         }
2457         memset(session, 0, sizeof(dpaa_sec_session));
2458
2459         /* Default IV length = 0 */
2460         session->iv.length = 0;
2461
2462         /* Cipher Only */
2463         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2464                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2465                 ret = dpaa_sec_cipher_init(dev, xform, session);
2466
2467         /* Authentication Only */
2468         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2469                    xform->next == NULL) {
2470                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2471                 session->ctxt = DPAA_SEC_AUTH;
2472                 ret = dpaa_sec_auth_init(dev, xform, session);
2473
2474         /* Cipher then Authenticate */
2475         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2476                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2477                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2478                         session->auth_cipher_text = 1;
2479                         if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2480                                 ret = dpaa_sec_auth_init(dev, xform, session);
2481                         else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2482                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2483                         else
2484                                 ret = dpaa_sec_chain_init(dev, xform, session);
2485                 } else {
2486                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2487                         return -ENOTSUP;
2488                 }
2489         /* Authenticate then Cipher */
2490         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2491                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2492                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2493                         session->auth_cipher_text = 0;
2494                         if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2495                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2496                         else if (xform->next->cipher.algo
2497                                         == RTE_CRYPTO_CIPHER_NULL)
2498                                 ret = dpaa_sec_auth_init(dev, xform, session);
2499                         else
2500                                 ret = dpaa_sec_chain_init(dev, xform, session);
2501                 } else {
2502                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2503                         return -ENOTSUP;
2504                 }
2505
2506         /* AEAD operation for AES-GCM kind of Algorithms */
2507         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2508                    xform->next == NULL) {
2509                 ret = dpaa_sec_aead_init(dev, xform, session);
2510
2511         } else {
2512                 DPAA_SEC_ERR("Invalid crypto type");
2513                 return -EINVAL;
2514         }
2515         if (ret) {
2516                 DPAA_SEC_ERR("unable to init session");
2517                 goto err1;
2518         }
2519
2520         rte_spinlock_lock(&internals->lock);
2521         for (i = 0; i < MAX_DPAA_CORES; i++) {
2522                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2523                 if (session->inq[i] == NULL) {
2524                         DPAA_SEC_ERR("unable to attach sec queue");
2525                         rte_spinlock_unlock(&internals->lock);
2526                         ret = -EBUSY;
2527                         goto err1;
2528                 }
2529         }
2530         rte_spinlock_unlock(&internals->lock);
2531
2532         return 0;
2533
2534 err1:
2535         free_session_data(session);
2536         return ret;
2537 }
2538
2539 static int
2540 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2541                 struct rte_crypto_sym_xform *xform,
2542                 struct rte_cryptodev_sym_session *sess,
2543                 struct rte_mempool *mempool)
2544 {
2545         void *sess_private_data;
2546         int ret;
2547
2548         PMD_INIT_FUNC_TRACE();
2549
2550         if (rte_mempool_get(mempool, &sess_private_data)) {
2551                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2552                 return -ENOMEM;
2553         }
2554
2555         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2556         if (ret != 0) {
2557                 DPAA_SEC_ERR("failed to configure session parameters");
2558
2559                 /* Return session to mempool */
2560                 rte_mempool_put(mempool, sess_private_data);
2561                 return ret;
2562         }
2563
2564         set_sym_session_private_data(sess, dev->driver_id,
2565                         sess_private_data);
2566
2567
2568         return 0;
2569 }
2570
2571 static inline void
2572 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2573 {
2574         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2575         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2576         uint8_t i;
2577
2578         for (i = 0; i < MAX_DPAA_CORES; i++) {
2579                 if (s->inq[i])
2580                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2581                 s->inq[i] = NULL;
2582                 s->qp[i] = NULL;
2583         }
2584         free_session_data(s);
2585         rte_mempool_put(sess_mp, (void *)s);
2586 }
2587
2588 /** Clear the memory of session so it doesn't leave key material behind */
2589 static void
2590 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2591                 struct rte_cryptodev_sym_session *sess)
2592 {
2593         PMD_INIT_FUNC_TRACE();
2594         uint8_t index = dev->driver_id;
2595         void *sess_priv = get_sym_session_private_data(sess, index);
2596         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2597
2598         if (sess_priv) {
2599                 free_session_memory(dev, s);
2600                 set_sym_session_private_data(sess, index, NULL);
2601         }
2602 }
2603
2604 #ifdef RTE_LIB_SECURITY
2605 static int
2606 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2607                         struct rte_security_ipsec_xform *ipsec_xform,
2608                         dpaa_sec_session *session)
2609 {
2610         PMD_INIT_FUNC_TRACE();
2611
2612         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2613                                                RTE_CACHE_LINE_SIZE);
2614         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2615                 DPAA_SEC_ERR("No Memory for aead key");
2616                 return -ENOMEM;
2617         }
2618         memcpy(session->aead_key.data, aead_xform->key.data,
2619                aead_xform->key.length);
2620
2621         session->digest_length = aead_xform->digest_length;
2622         session->aead_key.length = aead_xform->key.length;
2623
2624         switch (aead_xform->algo) {
2625         case RTE_CRYPTO_AEAD_AES_GCM:
2626                 switch (session->digest_length) {
2627                 case 8:
2628                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2629                         break;
2630                 case 12:
2631                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2632                         break;
2633                 case 16:
2634                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2635                         break;
2636                 default:
2637                         DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2638                                      session->digest_length);
2639                         return -EINVAL;
2640                 }
2641                 if (session->dir == DIR_ENC) {
2642                         memcpy(session->encap_pdb.gcm.salt,
2643                                 (uint8_t *)&(ipsec_xform->salt), 4);
2644                 } else {
2645                         memcpy(session->decap_pdb.gcm.salt,
2646                                 (uint8_t *)&(ipsec_xform->salt), 4);
2647                 }
2648                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2649                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2650                 break;
2651         default:
2652                 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2653                               aead_xform->algo);
2654                 return -ENOTSUP;
2655         }
2656         return 0;
2657 }
2658
2659 static int
2660 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2661         struct rte_crypto_auth_xform *auth_xform,
2662         struct rte_security_ipsec_xform *ipsec_xform,
2663         dpaa_sec_session *session)
2664 {
2665         if (cipher_xform) {
2666                 session->cipher_key.data = rte_zmalloc(NULL,
2667                                                        cipher_xform->key.length,
2668                                                        RTE_CACHE_LINE_SIZE);
2669                 if (session->cipher_key.data == NULL &&
2670                                 cipher_xform->key.length > 0) {
2671                         DPAA_SEC_ERR("No Memory for cipher key");
2672                         return -ENOMEM;
2673                 }
2674
2675                 session->cipher_key.length = cipher_xform->key.length;
2676                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2677                                 cipher_xform->key.length);
2678                 session->cipher_alg = cipher_xform->algo;
2679         } else {
2680                 session->cipher_key.data = NULL;
2681                 session->cipher_key.length = 0;
2682                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2683         }
2684
2685         if (auth_xform) {
2686                 session->auth_key.data = rte_zmalloc(NULL,
2687                                                 auth_xform->key.length,
2688                                                 RTE_CACHE_LINE_SIZE);
2689                 if (session->auth_key.data == NULL &&
2690                                 auth_xform->key.length > 0) {
2691                         DPAA_SEC_ERR("No Memory for auth key");
2692                         return -ENOMEM;
2693                 }
2694                 session->auth_key.length = auth_xform->key.length;
2695                 memcpy(session->auth_key.data, auth_xform->key.data,
2696                                 auth_xform->key.length);
2697                 session->auth_alg = auth_xform->algo;
2698                 session->digest_length = auth_xform->digest_length;
2699         } else {
2700                 session->auth_key.data = NULL;
2701                 session->auth_key.length = 0;
2702                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2703         }
2704
2705         switch (session->auth_alg) {
2706         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2707                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2708                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2709                 break;
2710         case RTE_CRYPTO_AUTH_MD5_HMAC:
2711                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2712                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2713                 break;
2714         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2715                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2716                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2717                 if (session->digest_length != 16)
2718                         DPAA_SEC_WARN(
2719                         "+++Using sha256-hmac truncated len is non-standard,"
2720                         "it will not work with lookaside proto");
2721                 break;
2722         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2723                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2724                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2725                 break;
2726         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2727                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2728                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2729                 break;
2730         case RTE_CRYPTO_AUTH_AES_CMAC:
2731                 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2732                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2733                 break;
2734         case RTE_CRYPTO_AUTH_NULL:
2735                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2736                 break;
2737         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2738                 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2739                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2740                 break;
2741         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2742         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2743         case RTE_CRYPTO_AUTH_SHA1:
2744         case RTE_CRYPTO_AUTH_SHA256:
2745         case RTE_CRYPTO_AUTH_SHA512:
2746         case RTE_CRYPTO_AUTH_SHA224:
2747         case RTE_CRYPTO_AUTH_SHA384:
2748         case RTE_CRYPTO_AUTH_MD5:
2749         case RTE_CRYPTO_AUTH_AES_GMAC:
2750         case RTE_CRYPTO_AUTH_KASUMI_F9:
2751         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2752         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2753                 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2754                               session->auth_alg);
2755                 return -ENOTSUP;
2756         default:
2757                 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2758                               session->auth_alg);
2759                 return -ENOTSUP;
2760         }
2761
2762         switch (session->cipher_alg) {
2763         case RTE_CRYPTO_CIPHER_AES_CBC:
2764                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2765                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2766                 break;
2767         case RTE_CRYPTO_CIPHER_DES_CBC:
2768                 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2769                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2770                 break;
2771         case RTE_CRYPTO_CIPHER_3DES_CBC:
2772                 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2773                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2774                 break;
2775         case RTE_CRYPTO_CIPHER_AES_CTR:
2776                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2777                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2778                 if (session->dir == DIR_ENC) {
2779                         session->encap_pdb.ctr.ctr_initial = 0x00000001;
2780                         session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2781                 } else {
2782                         session->decap_pdb.ctr.ctr_initial = 0x00000001;
2783                         session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2784                 }
2785                 break;
2786         case RTE_CRYPTO_CIPHER_NULL:
2787                 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2788                 break;
2789         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2790         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2791         case RTE_CRYPTO_CIPHER_3DES_ECB:
2792         case RTE_CRYPTO_CIPHER_AES_ECB:
2793         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2794                 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2795                               session->cipher_alg);
2796                 return -ENOTSUP;
2797         default:
2798                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2799                               session->cipher_alg);
2800                 return -ENOTSUP;
2801         }
2802
2803         return 0;
2804 }
2805
2806 static int
2807 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2808                            struct rte_security_session_conf *conf,
2809                            void *sess)
2810 {
2811         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2812         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2813         struct rte_crypto_auth_xform *auth_xform = NULL;
2814         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2815         struct rte_crypto_aead_xform *aead_xform = NULL;
2816         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2817         uint32_t i;
2818         int ret;
2819
2820         PMD_INIT_FUNC_TRACE();
2821
2822         memset(session, 0, sizeof(dpaa_sec_session));
2823         session->proto_alg = conf->protocol;
2824         session->ctxt = DPAA_SEC_IPSEC;
2825
2826         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2827                 session->dir = DIR_ENC;
2828         else
2829                 session->dir = DIR_DEC;
2830
2831         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2832                 cipher_xform = &conf->crypto_xform->cipher;
2833                 if (conf->crypto_xform->next)
2834                         auth_xform = &conf->crypto_xform->next->auth;
2835                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2836                                         ipsec_xform, session);
2837         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2838                 auth_xform = &conf->crypto_xform->auth;
2839                 if (conf->crypto_xform->next)
2840                         cipher_xform = &conf->crypto_xform->next->cipher;
2841                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2842                                         ipsec_xform, session);
2843         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2844                 aead_xform = &conf->crypto_xform->aead;
2845                 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2846                                         ipsec_xform, session);
2847         } else {
2848                 DPAA_SEC_ERR("XFORM not specified");
2849                 ret = -EINVAL;
2850                 goto out;
2851         }
2852         if (ret) {
2853                 DPAA_SEC_ERR("Failed to process xform");
2854                 goto out;
2855         }
2856
2857         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2858                 if (ipsec_xform->tunnel.type ==
2859                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2860                         session->ip4_hdr.ip_v = IPVERSION;
2861                         session->ip4_hdr.ip_hl = 5;
2862                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2863                                                 sizeof(session->ip4_hdr));
2864                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2865                         session->ip4_hdr.ip_id = 0;
2866                         session->ip4_hdr.ip_off = 0;
2867                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2868                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2869                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2870                                         IPPROTO_ESP : IPPROTO_AH;
2871                         session->ip4_hdr.ip_sum = 0;
2872                         session->ip4_hdr.ip_src =
2873                                         ipsec_xform->tunnel.ipv4.src_ip;
2874                         session->ip4_hdr.ip_dst =
2875                                         ipsec_xform->tunnel.ipv4.dst_ip;
2876                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2877                                                 (void *)&session->ip4_hdr,
2878                                                 sizeof(struct ip));
2879                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2880                 } else if (ipsec_xform->tunnel.type ==
2881                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2882                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2883                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
2884                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2885                                         RTE_IPV6_HDR_TC_SHIFT) &
2886                                         RTE_IPV6_HDR_TC_MASK) |
2887                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2888                                         RTE_IPV6_HDR_FL_SHIFT) &
2889                                         RTE_IPV6_HDR_FL_MASK));
2890                         /* Payload length will be updated by HW */
2891                         session->ip6_hdr.payload_len = 0;
2892                         session->ip6_hdr.hop_limits =
2893                                         ipsec_xform->tunnel.ipv6.hlimit;
2894                         session->ip6_hdr.proto = (ipsec_xform->proto ==
2895                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2896                                         IPPROTO_ESP : IPPROTO_AH;
2897                         memcpy(&session->ip6_hdr.src_addr,
2898                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
2899                         memcpy(&session->ip6_hdr.dst_addr,
2900                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2901                         session->encap_pdb.ip_hdr_len =
2902                                                 sizeof(struct rte_ipv6_hdr);
2903                 }
2904                 session->encap_pdb.options =
2905                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2906                         PDBOPTS_ESP_OIHI_PDB_INL |
2907                         PDBOPTS_ESP_IVSRC |
2908                         PDBHMO_ESP_ENCAP_DTTL |
2909                         PDBHMO_ESP_SNR;
2910                 if (ipsec_xform->options.esn)
2911                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2912                 session->encap_pdb.spi = ipsec_xform->spi;
2913
2914         } else if (ipsec_xform->direction ==
2915                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2916                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2917                         session->decap_pdb.options = sizeof(struct ip) << 16;
2918                 else
2919                         session->decap_pdb.options =
2920                                         sizeof(struct rte_ipv6_hdr) << 16;
2921                 if (ipsec_xform->options.esn)
2922                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2923                 if (ipsec_xform->replay_win_sz) {
2924                         uint32_t win_sz;
2925                         win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2926
2927                         switch (win_sz) {
2928                         case 1:
2929                         case 2:
2930                         case 4:
2931                         case 8:
2932                         case 16:
2933                         case 32:
2934                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2935                                 break;
2936                         case 64:
2937                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2938                                 break;
2939                         default:
2940                                 session->decap_pdb.options |=
2941                                                         PDBOPTS_ESP_ARS128;
2942                         }
2943                 }
2944         } else
2945                 goto out;
2946         rte_spinlock_lock(&internals->lock);
2947         for (i = 0; i < MAX_DPAA_CORES; i++) {
2948                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2949                 if (session->inq[i] == NULL) {
2950                         DPAA_SEC_ERR("unable to attach sec queue");
2951                         rte_spinlock_unlock(&internals->lock);
2952                         goto out;
2953                 }
2954         }
2955         rte_spinlock_unlock(&internals->lock);
2956
2957         return 0;
2958 out:
2959         free_session_data(session);
2960         return -1;
2961 }
2962
2963 static int
2964 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2965                           struct rte_security_session_conf *conf,
2966                           void *sess)
2967 {
2968         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2969         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2970         struct rte_crypto_auth_xform *auth_xform = NULL;
2971         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2972         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2973         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2974         uint32_t i;
2975         int ret;
2976
2977         PMD_INIT_FUNC_TRACE();
2978
2979         memset(session, 0, sizeof(dpaa_sec_session));
2980
2981         /* find xfrm types */
2982         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2983                 cipher_xform = &xform->cipher;
2984                 if (xform->next != NULL)
2985                         auth_xform = &xform->next->auth;
2986         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2987                 auth_xform = &xform->auth;
2988                 if (xform->next != NULL)
2989                         cipher_xform = &xform->next->cipher;
2990         } else {
2991                 DPAA_SEC_ERR("Invalid crypto type");
2992                 return -EINVAL;
2993         }
2994
2995         session->proto_alg = conf->protocol;
2996         session->ctxt = DPAA_SEC_PDCP;
2997
2998         if (cipher_xform) {
2999                 switch (cipher_xform->algo) {
3000                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3001                         session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3002                         break;
3003                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3004                         session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3005                         break;
3006                 case RTE_CRYPTO_CIPHER_AES_CTR:
3007                         session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3008                         break;
3009                 case RTE_CRYPTO_CIPHER_NULL:
3010                         session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3011                         break;
3012                 default:
3013                         DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3014                                       session->cipher_alg);
3015                         return -EINVAL;
3016                 }
3017
3018                 session->cipher_key.data = rte_zmalloc(NULL,
3019                                                cipher_xform->key.length,
3020                                                RTE_CACHE_LINE_SIZE);
3021                 if (session->cipher_key.data == NULL &&
3022                                 cipher_xform->key.length > 0) {
3023                         DPAA_SEC_ERR("No Memory for cipher key");
3024                         return -ENOMEM;
3025                 }
3026                 session->cipher_key.length = cipher_xform->key.length;
3027                 memcpy(session->cipher_key.data, cipher_xform->key.data,
3028                         cipher_xform->key.length);
3029                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3030                                         DIR_ENC : DIR_DEC;
3031                 session->cipher_alg = cipher_xform->algo;
3032         } else {
3033                 session->cipher_key.data = NULL;
3034                 session->cipher_key.length = 0;
3035                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3036                 session->dir = DIR_ENC;
3037         }
3038
3039         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3040                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3041                     pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3042                         DPAA_SEC_ERR(
3043                                 "PDCP Seq Num size should be 5/12 bits for cmode");
3044                         ret = -EINVAL;
3045                         goto out;
3046                 }
3047         }
3048
3049         if (auth_xform) {
3050                 switch (auth_xform->algo) {
3051                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3052                         session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3053                         break;
3054                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3055                         session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3056                         break;
3057                 case RTE_CRYPTO_AUTH_AES_CMAC:
3058                         session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3059                         break;
3060                 case RTE_CRYPTO_AUTH_NULL:
3061                         session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3062                         break;
3063                 default:
3064                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3065                                       session->auth_alg);
3066                         rte_free(session->cipher_key.data);
3067                         return -EINVAL;
3068                 }
3069                 session->auth_key.data = rte_zmalloc(NULL,
3070                                                      auth_xform->key.length,
3071                                                      RTE_CACHE_LINE_SIZE);
3072                 if (!session->auth_key.data &&
3073                     auth_xform->key.length > 0) {
3074                         DPAA_SEC_ERR("No Memory for auth key");
3075                         rte_free(session->cipher_key.data);
3076                         return -ENOMEM;
3077                 }
3078                 session->auth_key.length = auth_xform->key.length;
3079                 memcpy(session->auth_key.data, auth_xform->key.data,
3080                        auth_xform->key.length);
3081                 session->auth_alg = auth_xform->algo;
3082         } else {
3083                 session->auth_key.data = NULL;
3084                 session->auth_key.length = 0;
3085                 session->auth_alg = 0;
3086         }
3087         session->pdcp.domain = pdcp_xform->domain;
3088         session->pdcp.bearer = pdcp_xform->bearer;
3089         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3090         session->pdcp.sn_size = pdcp_xform->sn_size;
3091         session->pdcp.hfn = pdcp_xform->hfn;
3092         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3093         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3094         session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3095         if (cipher_xform)
3096                 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3097
3098         rte_spinlock_lock(&dev_priv->lock);
3099         for (i = 0; i < MAX_DPAA_CORES; i++) {
3100                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3101                 if (session->inq[i] == NULL) {
3102                         DPAA_SEC_ERR("unable to attach sec queue");
3103                         rte_spinlock_unlock(&dev_priv->lock);
3104                         ret = -EBUSY;
3105                         goto out;
3106                 }
3107         }
3108         rte_spinlock_unlock(&dev_priv->lock);
3109         return 0;
3110 out:
3111         rte_free(session->auth_key.data);
3112         rte_free(session->cipher_key.data);
3113         memset(session, 0, sizeof(dpaa_sec_session));
3114         return ret;
3115 }
3116
3117 static int
3118 dpaa_sec_security_session_create(void *dev,
3119                                  struct rte_security_session_conf *conf,
3120                                  struct rte_security_session *sess,
3121                                  struct rte_mempool *mempool)
3122 {
3123         void *sess_private_data;
3124         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3125         int ret;
3126
3127         if (rte_mempool_get(mempool, &sess_private_data)) {
3128                 DPAA_SEC_ERR("Couldn't get object from session mempool");
3129                 return -ENOMEM;
3130         }
3131
3132         switch (conf->protocol) {
3133         case RTE_SECURITY_PROTOCOL_IPSEC:
3134                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3135                                 sess_private_data);
3136                 break;
3137         case RTE_SECURITY_PROTOCOL_PDCP:
3138                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3139                                 sess_private_data);
3140                 break;
3141         case RTE_SECURITY_PROTOCOL_MACSEC:
3142                 return -ENOTSUP;
3143         default:
3144                 return -EINVAL;
3145         }
3146         if (ret != 0) {
3147                 DPAA_SEC_ERR("failed to configure session parameters");
3148                 /* Return session to mempool */
3149                 rte_mempool_put(mempool, sess_private_data);
3150                 return ret;
3151         }
3152
3153         set_sec_session_private_data(sess, sess_private_data);
3154
3155         return ret;
3156 }
3157
3158 /** Clear the memory of session so it doesn't leave key material behind */
3159 static int
3160 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3161                 struct rte_security_session *sess)
3162 {
3163         PMD_INIT_FUNC_TRACE();
3164         void *sess_priv = get_sec_session_private_data(sess);
3165         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3166
3167         if (sess_priv) {
3168                 free_session_memory((struct rte_cryptodev *)dev, s);
3169                 set_sec_session_private_data(sess, NULL);
3170         }
3171         return 0;
3172 }
3173 #endif
3174 static int
3175 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3176                        struct rte_cryptodev_config *config __rte_unused)
3177 {
3178         PMD_INIT_FUNC_TRACE();
3179
3180         return 0;
3181 }
3182
3183 static int
3184 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3185 {
3186         PMD_INIT_FUNC_TRACE();
3187         return 0;
3188 }
3189
3190 static void
3191 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3192 {
3193         PMD_INIT_FUNC_TRACE();
3194 }
3195
3196 static int
3197 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3198 {
3199         PMD_INIT_FUNC_TRACE();
3200
3201         if (dev == NULL)
3202                 return -ENOMEM;
3203
3204         return 0;
3205 }
3206
3207 static void
3208 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3209                        struct rte_cryptodev_info *info)
3210 {
3211         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3212
3213         PMD_INIT_FUNC_TRACE();
3214         if (info != NULL) {
3215                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3216                 info->feature_flags = dev->feature_flags;
3217                 info->capabilities = dpaa_sec_capabilities;
3218                 info->sym.max_nb_sessions = internals->max_nb_sessions;
3219                 info->driver_id = cryptodev_driver_id;
3220         }
3221 }
3222
3223 static enum qman_cb_dqrr_result
3224 dpaa_sec_process_parallel_event(void *event,
3225                         struct qman_portal *qm __always_unused,
3226                         struct qman_fq *outq,
3227                         const struct qm_dqrr_entry *dqrr,
3228                         void **bufs)
3229 {
3230         const struct qm_fd *fd;
3231         struct dpaa_sec_job *job;
3232         struct dpaa_sec_op_ctx *ctx;
3233         struct rte_event *ev = (struct rte_event *)event;
3234
3235         fd = &dqrr->fd;
3236
3237         /* sg is embedded in an op ctx,
3238          * sg[0] is for output
3239          * sg[1] for input
3240          */
3241         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3242
3243         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3244         ctx->fd_status = fd->status;
3245         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3246                 struct qm_sg_entry *sg_out;
3247                 uint32_t len;
3248
3249                 sg_out = &job->sg[0];
3250                 hw_sg_to_cpu(sg_out);
3251                 len = sg_out->length;
3252                 ctx->op->sym->m_src->pkt_len = len;
3253                 ctx->op->sym->m_src->data_len = len;
3254         }
3255         if (!ctx->fd_status) {
3256                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3257         } else {
3258                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3259                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3260         }
3261         ev->event_ptr = (void *)ctx->op;
3262
3263         ev->flow_id = outq->ev.flow_id;
3264         ev->sub_event_type = outq->ev.sub_event_type;
3265         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3266         ev->op = RTE_EVENT_OP_NEW;
3267         ev->sched_type = outq->ev.sched_type;
3268         ev->queue_id = outq->ev.queue_id;
3269         ev->priority = outq->ev.priority;
3270         *bufs = (void *)ctx->op;
3271
3272         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3273
3274         return qman_cb_dqrr_consume;
3275 }
3276
3277 static enum qman_cb_dqrr_result
3278 dpaa_sec_process_atomic_event(void *event,
3279                         struct qman_portal *qm __rte_unused,
3280                         struct qman_fq *outq,
3281                         const struct qm_dqrr_entry *dqrr,
3282                         void **bufs)
3283 {
3284         u8 index;
3285         const struct qm_fd *fd;
3286         struct dpaa_sec_job *job;
3287         struct dpaa_sec_op_ctx *ctx;
3288         struct rte_event *ev = (struct rte_event *)event;
3289
3290         fd = &dqrr->fd;
3291
3292         /* sg is embedded in an op ctx,
3293          * sg[0] is for output
3294          * sg[1] for input
3295          */
3296         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3297
3298         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3299         ctx->fd_status = fd->status;
3300         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3301                 struct qm_sg_entry *sg_out;
3302                 uint32_t len;
3303
3304                 sg_out = &job->sg[0];
3305                 hw_sg_to_cpu(sg_out);
3306                 len = sg_out->length;
3307                 ctx->op->sym->m_src->pkt_len = len;
3308                 ctx->op->sym->m_src->data_len = len;
3309         }
3310         if (!ctx->fd_status) {
3311                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3312         } else {
3313                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3314                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3315         }
3316         ev->event_ptr = (void *)ctx->op;
3317         ev->flow_id = outq->ev.flow_id;
3318         ev->sub_event_type = outq->ev.sub_event_type;
3319         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3320         ev->op = RTE_EVENT_OP_NEW;
3321         ev->sched_type = outq->ev.sched_type;
3322         ev->queue_id = outq->ev.queue_id;
3323         ev->priority = outq->ev.priority;
3324
3325         /* Save active dqrr entries */
3326         index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3327         DPAA_PER_LCORE_DQRR_SIZE++;
3328         DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3329         DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3330         ev->impl_opaque = index + 1;
3331         *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3332         *bufs = (void *)ctx->op;
3333
3334         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3335
3336         return qman_cb_dqrr_defer;
3337 }
3338
3339 int
3340 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3341                 int qp_id,
3342                 uint16_t ch_id,
3343                 const struct rte_event *event)
3344 {
3345         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3346         struct qm_mcc_initfq opts = {0};
3347
3348         int ret;
3349
3350         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3351                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3352         opts.fqd.dest.channel = ch_id;
3353
3354         switch (event->sched_type) {
3355         case RTE_SCHED_TYPE_ATOMIC:
3356                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3357                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3358                  * configuration with HOLD_ACTIVE setting
3359                  */
3360                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3361                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3362                 break;
3363         case RTE_SCHED_TYPE_ORDERED:
3364                 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3365                 return -ENOTSUP;
3366         default:
3367                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3368                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3369                 break;
3370         }
3371
3372         ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3373         if (unlikely(ret)) {
3374                 DPAA_SEC_ERR("unable to init caam source fq!");
3375                 return ret;
3376         }
3377
3378         memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3379
3380         return 0;
3381 }
3382
3383 int
3384 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3385                         int qp_id)
3386 {
3387         struct qm_mcc_initfq opts = {0};
3388         int ret;
3389         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3390
3391         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3392                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3393         qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3394         qp->outq.cb.ern  = ern_sec_fq_handler;
3395         qman_retire_fq(&qp->outq, NULL);
3396         qman_oos_fq(&qp->outq);
3397         ret = qman_init_fq(&qp->outq, 0, &opts);
3398         if (ret)
3399                 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3400         qp->outq.cb.dqrr = NULL;
3401
3402         return ret;
3403 }
3404
3405 static struct rte_cryptodev_ops crypto_ops = {
3406         .dev_configure        = dpaa_sec_dev_configure,
3407         .dev_start            = dpaa_sec_dev_start,
3408         .dev_stop             = dpaa_sec_dev_stop,
3409         .dev_close            = dpaa_sec_dev_close,
3410         .dev_infos_get        = dpaa_sec_dev_infos_get,
3411         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
3412         .queue_pair_release   = dpaa_sec_queue_pair_release,
3413         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
3414         .sym_session_configure    = dpaa_sec_sym_session_configure,
3415         .sym_session_clear        = dpaa_sec_sym_session_clear
3416 };
3417
3418 #ifdef RTE_LIB_SECURITY
3419 static const struct rte_security_capability *
3420 dpaa_sec_capabilities_get(void *device __rte_unused)
3421 {
3422         return dpaa_sec_security_cap;
3423 }
3424
3425 static const struct rte_security_ops dpaa_sec_security_ops = {
3426         .session_create = dpaa_sec_security_session_create,
3427         .session_update = NULL,
3428         .session_stats_get = NULL,
3429         .session_destroy = dpaa_sec_security_session_destroy,
3430         .set_pkt_metadata = NULL,
3431         .capabilities_get = dpaa_sec_capabilities_get
3432 };
3433 #endif
3434 static int
3435 dpaa_sec_uninit(struct rte_cryptodev *dev)
3436 {
3437         struct dpaa_sec_dev_private *internals;
3438
3439         if (dev == NULL)
3440                 return -ENODEV;
3441
3442         internals = dev->data->dev_private;
3443         rte_free(dev->security_ctx);
3444
3445         rte_free(internals);
3446
3447         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3448                       dev->data->name, rte_socket_id());
3449
3450         return 0;
3451 }
3452
3453 static int
3454 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3455 {
3456         struct dpaa_sec_dev_private *internals;
3457 #ifdef RTE_LIB_SECURITY
3458         struct rte_security_ctx *security_instance;
3459 #endif
3460         struct dpaa_sec_qp *qp;
3461         uint32_t i, flags;
3462         int ret;
3463
3464         PMD_INIT_FUNC_TRACE();
3465
3466         cryptodev->driver_id = cryptodev_driver_id;
3467         cryptodev->dev_ops = &crypto_ops;
3468
3469         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3470         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3471         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3472                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3473                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3474                         RTE_CRYPTODEV_FF_SECURITY |
3475                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3476                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3477                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3478                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3479                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3480
3481         internals = cryptodev->data->dev_private;
3482         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3483         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3484
3485         /*
3486          * For secondary processes, we don't initialise any further as primary
3487          * has already done this work. Only check we don't need a different
3488          * RX function
3489          */
3490         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3491                 DPAA_SEC_WARN("Device already init by primary process");
3492                 return 0;
3493         }
3494 #ifdef RTE_LIB_SECURITY
3495         /* Initialize security_ctx only for primary process*/
3496         security_instance = rte_malloc("rte_security_instances_ops",
3497                                 sizeof(struct rte_security_ctx), 0);
3498         if (security_instance == NULL)
3499                 return -ENOMEM;
3500         security_instance->device = (void *)cryptodev;
3501         security_instance->ops = &dpaa_sec_security_ops;
3502         security_instance->sess_cnt = 0;
3503         cryptodev->security_ctx = security_instance;
3504 #endif
3505         rte_spinlock_init(&internals->lock);
3506         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3507                 /* init qman fq for queue pair */
3508                 qp = &internals->qps[i];
3509                 ret = dpaa_sec_init_tx(&qp->outq);
3510                 if (ret) {
3511                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
3512                         goto init_error;
3513                 }
3514         }
3515
3516         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3517                 QMAN_FQ_FLAG_TO_DCPORTAL;
3518         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3519                 /* create rx qman fq for sessions*/
3520                 ret = qman_create_fq(0, flags, &internals->inq[i]);
3521                 if (unlikely(ret != 0)) {
3522                         DPAA_SEC_ERR("sec qman_create_fq failed");
3523                         goto init_error;
3524                 }
3525         }
3526
3527         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3528         return 0;
3529
3530 init_error:
3531         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3532
3533         rte_free(cryptodev->security_ctx);
3534         return -EFAULT;
3535 }
3536
3537 static int
3538 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3539                                 struct rte_dpaa_device *dpaa_dev)
3540 {
3541         struct rte_cryptodev *cryptodev;
3542         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3543
3544         int retval;
3545
3546         snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3547
3548         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3549         if (cryptodev == NULL)
3550                 return -ENOMEM;
3551
3552         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3553                 cryptodev->data->dev_private = rte_zmalloc_socket(
3554                                         "cryptodev private structure",
3555                                         sizeof(struct dpaa_sec_dev_private),
3556                                         RTE_CACHE_LINE_SIZE,
3557                                         rte_socket_id());
3558
3559                 if (cryptodev->data->dev_private == NULL)
3560                         rte_panic("Cannot allocate memzone for private "
3561                                         "device data");
3562         }
3563
3564         dpaa_dev->crypto_dev = cryptodev;
3565         cryptodev->device = &dpaa_dev->device;
3566
3567         /* init user callbacks */
3568         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3569
3570         /* if sec device version is not configured */
3571         if (!rta_get_sec_era()) {
3572                 const struct device_node *caam_node;
3573
3574                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3575                         const uint32_t *prop = of_get_property(caam_node,
3576                                         "fsl,sec-era",
3577                                         NULL);
3578                         if (prop) {
3579                                 rta_set_sec_era(
3580                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3581                                 break;
3582                         }
3583                 }
3584         }
3585
3586         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3587                 retval = rte_dpaa_portal_init((void *)1);
3588                 if (retval) {
3589                         DPAA_SEC_ERR("Unable to initialize portal");
3590                         goto out;
3591                 }
3592         }
3593
3594         /* Invoke PMD device initialization function */
3595         retval = dpaa_sec_dev_init(cryptodev);
3596         if (retval == 0)
3597                 return 0;
3598
3599         retval = -ENXIO;
3600 out:
3601         /* In case of error, cleanup is done */
3602         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3603                 rte_free(cryptodev->data->dev_private);
3604
3605         rte_cryptodev_pmd_release_device(cryptodev);
3606
3607         return retval;
3608 }
3609
3610 static int
3611 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3612 {
3613         struct rte_cryptodev *cryptodev;
3614         int ret;
3615
3616         cryptodev = dpaa_dev->crypto_dev;
3617         if (cryptodev == NULL)
3618                 return -ENODEV;
3619
3620         ret = dpaa_sec_uninit(cryptodev);
3621         if (ret)
3622                 return ret;
3623
3624         return rte_cryptodev_pmd_destroy(cryptodev);
3625 }
3626
3627 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3628         .drv_type = FSL_DPAA_CRYPTO,
3629         .driver = {
3630                 .name = "DPAA SEC PMD"
3631         },
3632         .probe = cryptodev_dpaa_sec_probe,
3633         .remove = cryptodev_dpaa_sec_remove,
3634 };
3635
3636 static struct cryptodev_driver dpaa_sec_crypto_drv;
3637
3638 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3639 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3640                 cryptodev_driver_id);
3641 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);